text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""Provides a UPNP discovery method that mimicks Hue hubs."""
import threading
import socket
import logging
import select
from aiohttp import web
from homeassistant import core
from homeassistant.components.http import HomeAssistantView
_LOGGER = logging.getLogger(__name__)
class DescriptionXmlView(HomeAssistantView):
"""Handles requests for the description.xml file."""
url = '/description.xml'
name = 'description:xml'
requires_auth = False
def __init__(self, config):
"""Initialize the instance of the view."""
self.config = config
@core.callback
def get(self, request):
"""Handle a GET request."""
xml_template = """<?xml version="1.0" encoding="UTF-8" ?>
<root xmlns="urn:schemas-upnp-org:device-1-0">
<specVersion>
<major>1</major>
<minor>0</minor>
</specVersion>
<URLBase>http://{0}:{1}/</URLBase>
<device>
<deviceType>urn:schemas-upnp-org:device:Basic:1</deviceType>
<friendlyName>HASS Bridge ({0})</friendlyName>
<manufacturer>Royal Philips Electronics</manufacturer>
<manufacturerURL>http://www.philips.com</manufacturerURL>
<modelDescription>Philips hue Personal Wireless Lighting</modelDescription>
<modelName>Philips hue bridge 2015</modelName>
<modelNumber>BSB002</modelNumber>
<modelURL>http://www.meethue.com</modelURL>
<serialNumber>1234</serialNumber>
<UDN>uuid:2f402f80-da50-11e1-9b23-001788255acc</UDN>
</device>
</root>
"""
resp_text = xml_template.format(
self.config.advertise_ip, self.config.advertise_port)
return web.Response(text=resp_text, content_type='text/xml')
class UPNPResponderThread(threading.Thread):
"""Handle responding to UPNP/SSDP discovery requests."""
_interrupted = False
def __init__(self, host_ip_addr, listen_port, upnp_bind_multicast,
advertise_ip, advertise_port):
"""Initialize the class."""
threading.Thread.__init__(self)
self.host_ip_addr = host_ip_addr
self.listen_port = listen_port
self.upnp_bind_multicast = upnp_bind_multicast
# Note that the double newline at the end of
# this string is required per the SSDP spec
resp_template = """HTTP/1.1 200 OK
CACHE-CONTROL: max-age=60
EXT:
LOCATION: http://{0}:{1}/description.xml
SERVER: FreeRTOS/6.0.5, UPnP/1.0, IpBridge/0.1
hue-bridgeid: 1234
ST: urn:schemas-upnp-org:device:basic:1
USN: uuid:Socket-1_0-221438K0100073::urn:schemas-upnp-org:device:basic:1
"""
self.upnp_response = resp_template.format(
advertise_ip, advertise_port).replace("\n", "\r\n") \
.encode('utf-8')
def run(self):
"""Run the server."""
# Listen for UDP port 1900 packets sent to SSDP multicast address
ssdp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ssdp_socket.setblocking(False)
# Required for receiving multicast
ssdp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
ssdp_socket.setsockopt(
socket.SOL_IP,
socket.IP_MULTICAST_IF,
socket.inet_aton(self.host_ip_addr))
ssdp_socket.setsockopt(
socket.SOL_IP,
socket.IP_ADD_MEMBERSHIP,
socket.inet_aton("239.255.255.250") +
socket.inet_aton(self.host_ip_addr))
if self.upnp_bind_multicast:
ssdp_socket.bind(("", 1900))
else:
ssdp_socket.bind((self.host_ip_addr, 1900))
while True:
if self._interrupted:
clean_socket_close(ssdp_socket)
return
try:
read, _, _ = select.select(
[ssdp_socket], [],
[ssdp_socket], 2)
if ssdp_socket in read:
data, addr = ssdp_socket.recvfrom(1024)
else:
# most likely the timeout, so check for interupt
continue
except socket.error as ex:
if self._interrupted:
clean_socket_close(ssdp_socket)
return
_LOGGER.error("UPNP Responder socket exception occured: %s",
ex.__str__)
# without the following continue, a second exception occurs
# because the data object has not been initialized
continue
if "M-SEARCH" in data.decode('utf-8'):
# SSDP M-SEARCH method received, respond to it with our info
resp_socket = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM)
resp_socket.sendto(self.upnp_response, addr)
resp_socket.close()
def stop(self):
"""Stop the server."""
# Request for server
self._interrupted = True
self.join()
def clean_socket_close(sock):
"""Close a socket connection and logs its closure."""
_LOGGER.info("UPNP responder shutting down.")
sock.close()
|
{
"content_hash": "3039f6e01fe091716a6c4ee5a25d927c",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 76,
"avg_line_length": 31.848101265822784,
"alnum_prop": 0.6063195548489666,
"repo_name": "LinuxChristian/home-assistant",
"id": "f8d414240649c080bf1495c95ce849dea875f742",
"size": "5032",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/emulated_hue/upnp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13788"
},
{
"name": "HTML",
"bytes": "1733802"
},
{
"name": "JavaScript",
"bytes": "15192"
},
{
"name": "Python",
"bytes": "7415265"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "15154"
}
],
"symlink_target": ""
}
|
"""Client for interacting with the Google Cloud DNS API."""
from google.api_core import page_iterator
from google.cloud.client import ClientWithProject
from google.cloud.dns._http import Connection
from google.cloud.dns.zone import ManagedZone
class Client(ClientWithProject):
"""Client to bundle configuration needed for API requests.
:type project: str
:param project: the project which the client acts on behalf of. Will be
passed when creating a zone. If not passed,
falls back to the default inferred from the environment.
:type credentials: :class:`~google.auth.credentials.Credentials`
:param credentials: (Optional) The OAuth2 Credentials to use for this
client. If not passed (and if no ``_http`` object is
passed), falls back to the default inferred from the
environment.
:type _http: :class:`~requests.Session`
:param _http: (Optional) HTTP object to make requests. Can be any object
that defines ``request()`` with the same interface as
:meth:`requests.Session.request`. If not passed, an
``_http`` object is created that is bound to the
``credentials`` for the current object.
This parameter should be considered private, and could
change in the future.
"""
SCOPE = ('https://www.googleapis.com/auth/ndev.clouddns.readwrite',)
"""The scopes required for authenticating as a Cloud DNS consumer."""
def __init__(self, project=None, credentials=None, _http=None):
super(Client, self).__init__(
project=project, credentials=credentials, _http=_http)
self._connection = Connection(self)
def quotas(self):
"""Return DNS quotas for the project associated with this client.
See
https://cloud.google.com/dns/api/v1/projects/get
:rtype: mapping
:returns: keys for the mapping correspond to those of the ``quota``
sub-mapping of the project resource.
"""
path = '/projects/%s' % (self.project,)
resp = self._connection.api_request(method='GET', path=path)
return {key: int(value)
for key, value in resp['quota'].items()
if key != 'kind'}
def list_zones(self, max_results=None, page_token=None):
"""List zones for the project associated with this client.
See
https://cloud.google.com/dns/api/v1/managedZones/list
:type max_results: int
:param max_results: maximum number of zones to return, If not
passed, defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of zones. If
not passed, the API will return the first page of
zones.
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:returns: Iterator of :class:`~google.cloud.dns.zone.ManagedZone`
belonging to this project.
"""
path = '/projects/%s/managedZones' % (self.project,)
return page_iterator.HTTPIterator(
client=self,
api_request=self._connection.api_request,
path=path,
item_to_value=_item_to_zone,
items_key='managedZones',
page_token=page_token,
max_results=max_results)
def zone(self, name, dns_name=None, description=None):
"""Construct a zone bound to this client.
:type name: str
:param name: Name of the zone.
:type dns_name: str
:param dns_name:
(Optional) DNS name of the zone. If not passed, then calls to
:meth:`zone.create` will fail.
:type description: str
:param description:
(Optional) the description for the zone. If not passed, defaults
to the value of 'dns_name'.
:rtype: :class:`google.cloud.dns.zone.ManagedZone`
:returns: a new ``ManagedZone`` instance.
"""
return ManagedZone(name, dns_name, client=self,
description=description)
def _item_to_zone(iterator, resource):
"""Convert a JSON managed zone to the native object.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that has retrieved the item.
:type resource: dict
:param resource: An item to be converted to a managed zone.
:rtype: :class:`.ManagedZone`
:returns: The next managed zone in the page.
"""
return ManagedZone.from_api_repr(resource, iterator.client)
|
{
"content_hash": "3ca5d684fcf7c95df3aac07fd66c5615",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 77,
"avg_line_length": 38.6260162601626,
"alnum_prop": 0.6091349189644285,
"repo_name": "jonparrott/gcloud-python",
"id": "386190bab7a0ab719682b89237e4de6a1539b2f3",
"size": "5326",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "dns/google/cloud/dns/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "62009"
},
{
"name": "Python",
"bytes": "3459300"
},
{
"name": "Shell",
"bytes": "7548"
}
],
"symlink_target": ""
}
|
from django.core.exceptions import FieldError
from django.test import TestCase
from .models import Article, Author
class CustomColumnsTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(first_name="John", last_name="Smith")
cls.a2 = Author.objects.create(first_name="Peter", last_name="Jones")
cls.authors = [cls.a1, cls.a2]
cls.article = Article.objects.create(headline="Django lets you build web apps easily", primary_author=cls.a1)
cls.article.authors.set(cls.authors)
def test_query_all_available_authors(self):
self.assertQuerysetEqual(
Author.objects.all(), [
"Peter Jones", "John Smith",
],
str
)
def test_get_first_name(self):
self.assertEqual(
Author.objects.get(first_name__exact="John"),
self.a1,
)
def test_filter_first_name(self):
self.assertQuerysetEqual(
Author.objects.filter(first_name__exact="John"), [
"John Smith",
],
str
)
def test_field_error(self):
msg = (
"Cannot resolve keyword 'firstname' into field. Choices are: "
"Author_ID, article, first_name, last_name, primary_set"
)
with self.assertRaisesMessage(FieldError, msg):
Author.objects.filter(firstname__exact="John")
def test_attribute_error(self):
with self.assertRaises(AttributeError):
self.a1.firstname
with self.assertRaises(AttributeError):
self.a1.last
def test_get_all_authors_for_an_article(self):
self.assertQuerysetEqual(
self.article.authors.all(), [
"Peter Jones",
"John Smith",
],
str
)
def test_get_all_articles_for_an_author(self):
self.assertQuerysetEqual(
self.a1.article_set.all(), [
"Django lets you build web apps easily",
],
lambda a: a.headline
)
def test_get_author_m2m_relation(self):
self.assertQuerysetEqual(
self.article.authors.filter(last_name='Jones'), [
"Peter Jones"
],
str
)
def test_author_querying(self):
self.assertSequenceEqual(
Author.objects.all().order_by('last_name'),
[self.a2, self.a1],
)
def test_author_filtering(self):
self.assertSequenceEqual(
Author.objects.filter(first_name__exact='John'),
[self.a1],
)
def test_author_get(self):
self.assertEqual(self.a1, Author.objects.get(first_name__exact='John'))
def test_filter_on_nonexistent_field(self):
msg = (
"Cannot resolve keyword 'firstname' into field. Choices are: "
"Author_ID, article, first_name, last_name, primary_set"
)
with self.assertRaisesMessage(FieldError, msg):
Author.objects.filter(firstname__exact='John')
def test_author_get_attributes(self):
a = Author.objects.get(last_name__exact='Smith')
self.assertEqual('John', a.first_name)
self.assertEqual('Smith', a.last_name)
with self.assertRaisesMessage(AttributeError, "'Author' object has no attribute 'firstname'"):
getattr(a, 'firstname')
with self.assertRaisesMessage(AttributeError, "'Author' object has no attribute 'last'"):
getattr(a, 'last')
def test_m2m_table(self):
self.assertSequenceEqual(
self.article.authors.all().order_by('last_name'),
[self.a2, self.a1],
)
self.assertSequenceEqual(self.a1.article_set.all(), [self.article])
self.assertSequenceEqual(
self.article.authors.filter(last_name='Jones'),
[self.a2],
)
|
{
"content_hash": "dd9b3e10a3a4d0458ac3de66e06da012",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 117,
"avg_line_length": 32.19672131147541,
"alnum_prop": 0.580193482688391,
"repo_name": "ar4s/django",
"id": "20e649de03a4185f3888967c48ee219970ca5359",
"size": "3928",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/custom_columns/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""
Tests that check that we ignore the appropriate files when importing courses.
"""
import unittest
from mock import Mock
from xmodule.modulestore.xml_importer import import_static_content
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.tests import DATA_DIR
'''
TODO: Update
class IgnoredFilesTestCase(unittest.TestCase):
"Tests for ignored files"
def test_ignore_tilde_static_files(self):
course_dir = DATA_DIR / "tilde"
course_id = SlashSeparatedCourseKey("edX", "tilde", "Fall_2012")
content_store = Mock()
content_store.generate_thumbnail.return_value = ("content", "location")
import_static_content(course_dir, content_store, course_id)
saved_static_content = [call[0][0] for call in content_store.save.call_args_list]
name_val = {sc.name: sc.data for sc in saved_static_content}
self.assertIn("example.txt", name_val)
self.assertNotIn("example.txt~", name_val)
self.assertIn("GREEN", name_val["example.txt"])
def test_ignore_dot_underscore_static_files(self):
"""
Test for ignored Mac OS metadata files (filename starts with "._")
"""
course_dir = DATA_DIR / "dot-underscore"
course_id = SlashSeparatedCourseKey("edX", "dot-underscore", "2014_Fall")
content_store = Mock()
content_store.generate_thumbnail.return_value = ("content", "location")
import_static_content(course_dir, content_store, course_id)
saved_static_content = [call[0][0] for call in content_store.save.call_args_list]
name_val = {sc.name: sc.data for sc in saved_static_content}
self.assertIn("example.txt", name_val)
self.assertIn(".example.txt", name_val)
self.assertNotIn("._example.txt", name_val)
self.assertNotIn(".DS_Store", name_val)
self.assertIn("GREEN", name_val["example.txt"])
self.assertIn("BLUE", name_val[".example.txt"])
'''
|
{
"content_hash": "89500337e0d234cc84b820da9afe115d",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 89,
"avg_line_length": 45.7906976744186,
"alnum_prop": 0.6678517013712545,
"repo_name": "bmedx/modulestore",
"id": "7f96e95c8d38b945ceef1a61581294ef274446b6",
"size": "1969",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xmodule/tests/test_import_static.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "8102"
},
{
"name": "Makefile",
"bytes": "3554"
},
{
"name": "Python",
"bytes": "1355902"
},
{
"name": "Shell",
"bytes": "132"
}
],
"symlink_target": ""
}
|
'''
Created on 2012-10-12
@author: s00228753
'''
from grantee import Grantee
class Grant(object):
#===========================================================================
# 初始化
# @param grantee 被授权者
# @param permission 权限
#===========================================================================
def __init__(self, grantee = Grantee(), permission = None):
self.grantee = grantee
self.permission = permission
class Permission:
READ = "READ"
WRITE = "WRITE"
READ_ACP = "READ_ACP"
WRITE_ACP = "WRITE_ACP"
FULL_CONTROL = "FULL_CONTROL"
|
{
"content_hash": "4560acb85deec9b4210408d9d6fe0272",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 80,
"avg_line_length": 21.2,
"alnum_prop": 0.4308176100628931,
"repo_name": "Fangfenghua/docker-registry-driver-huaweimos",
"id": "874707841f2ce6ba893134c5aa2f0896727480cf",
"size": "668",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "com/hws/s3/models/grant.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "81612"
}
],
"symlink_target": ""
}
|
"""Support for MyQ gateways."""
from pymyq.const import (
DEVICE_FAMILY as MYQ_DEVICE_FAMILY,
DEVICE_FAMILY_GATEWAY as MYQ_DEVICE_FAMILY_GATEWAY,
DEVICE_STATE as MYQ_DEVICE_STATE,
DEVICE_STATE_ONLINE as MYQ_DEVICE_STATE_ONLINE,
KNOWN_MODELS,
MANUFACTURER,
)
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_CONNECTIVITY,
BinarySensorEntity,
)
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN, MYQ_COORDINATOR, MYQ_GATEWAY
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up mysq covers."""
data = hass.data[DOMAIN][config_entry.entry_id]
myq = data[MYQ_GATEWAY]
coordinator = data[MYQ_COORDINATOR]
entities = []
for device in myq.devices.values():
if device.device_json[MYQ_DEVICE_FAMILY] == MYQ_DEVICE_FAMILY_GATEWAY:
entities.append(MyQBinarySensorEntity(coordinator, device))
async_add_entities(entities, True)
class MyQBinarySensorEntity(CoordinatorEntity, BinarySensorEntity):
"""Representation of a MyQ gateway."""
def __init__(self, coordinator, device):
"""Initialize with API object, device id."""
super().__init__(coordinator)
self._device = device
@property
def device_class(self):
"""We track connectivity for gateways."""
return DEVICE_CLASS_CONNECTIVITY
@property
def name(self):
"""Return the name of the garage door if any."""
return f"{self._device.name} MyQ Gateway"
@property
def is_on(self):
"""Return if the device is online."""
if not self.coordinator.last_update_success:
return False
# Not all devices report online so assume True if its missing
return self._device.device_json[MYQ_DEVICE_STATE].get(
MYQ_DEVICE_STATE_ONLINE, True
)
@property
def available(self) -> bool:
"""Entity is always available."""
return True
@property
def unique_id(self):
"""Return a unique, Home Assistant friendly identifier for this entity."""
return self._device.device_id
@property
def device_info(self):
"""Return the device_info of the device."""
device_info = {
"identifiers": {(DOMAIN, self._device.device_id)},
"name": self.name,
"manufacturer": MANUFACTURER,
"sw_version": self._device.firmware_version,
}
model = KNOWN_MODELS.get(self._device.device_id[2:4])
if model:
device_info["model"] = model
return device_info
|
{
"content_hash": "4866ed0cbf40399985378459b7b92061",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 82,
"avg_line_length": 30.298850574712645,
"alnum_prop": 0.6426403641881638,
"repo_name": "soldag/home-assistant",
"id": "57bd2451d2ab2cd517c8f1b6c21b81210d79bca5",
"size": "2636",
"binary": false,
"copies": "6",
"ref": "refs/heads/dev",
"path": "homeassistant/components/myq/binary_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19025087"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
"""Configuration.from_env() tests."""
from pytest import mark, raises
def test(config):
config.from_env("CONFIG_TEST_ENV")
assert config() == "test-value"
def test_with_children(config):
config.section1.value1.from_env("CONFIG_TEST_ENV")
assert config() == {"section1": {"value1": "test-value"}}
assert config.section1() == {"value1": "test-value"}
assert config.section1.value1() == "test-value"
def test_default(config):
config.from_env("UNDEFINED_ENV", "default-value")
assert config() == "default-value"
def test_default_none(config):
config.from_env("UNDEFINED_ENV")
assert config() is None
def test_option_default_none(config):
config.option.from_env("UNDEFINED_ENV")
assert config.option() is None
@mark.parametrize("config_type", ["strict"])
def test_undefined_in_strict_mode(config):
with raises(ValueError):
config.from_env("UNDEFINED_ENV")
@mark.parametrize("config_type", ["strict"])
def test_option_undefined_in_strict_mode(config):
with raises(ValueError):
config.option.from_env("UNDEFINED_ENV")
def test_undefined_in_strict_mode_with_default(config):
config.from_env("UNDEFINED_ENV", "default-value")
assert config() == "default-value"
@mark.parametrize("config_type", ["strict"])
def test_option_undefined_in_strict_mode_with_default(config):
config.option.from_env("UNDEFINED_ENV", "default-value")
assert config.option() == "default-value"
def test_required_undefined(config):
with raises(ValueError):
config.from_env("UNDEFINED_ENV", required=True)
def test_required_undefined_with_default(config):
config.from_env("UNDEFINED_ENV", default="default-value", required=True)
assert config() == "default-value"
def test_option_required_undefined(config):
with raises(ValueError):
config.option.from_env("UNDEFINED_ENV", required=True)
def test_option_required_undefined_with_default(config):
config.option.from_env("UNDEFINED_ENV", default="default-value", required=True)
assert config.option() == "default-value"
@mark.parametrize("config_type", ["strict"])
def test_not_required_undefined_in_strict_mode(config):
config.from_env("UNDEFINED_ENV", required=False)
assert config() is None
@mark.parametrize("config_type", ["strict"])
def test_option_not_required_undefined_in_strict_mode(config):
config.option.from_env("UNDEFINED_ENV", required=False)
assert config.option() is None
@mark.parametrize("config_type", ["strict"])
def test_not_required_undefined_with_default_in_strict_mode(config):
config.from_env("UNDEFINED_ENV", default="default-value", required=False)
assert config() == "default-value"
@mark.parametrize("config_type", ["strict"])
def test_option_not_required_undefined_with_default_in_strict_mode(config):
config.option.from_env("UNDEFINED_ENV", default="default-value", required=False)
assert config.option() == "default-value"
|
{
"content_hash": "0e45f4d72873ba963ad4a1573324a68d",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 84,
"avg_line_length": 30.224489795918366,
"alnum_prop": 0.7052667116812964,
"repo_name": "rmk135/dependency_injector",
"id": "4c9a70db5bed75096933027980d3e71a60df2f6f",
"size": "2962",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/providers/configuration/test_from_env_py2_py3.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "171241"
}
],
"symlink_target": ""
}
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Difference'] , ['MovingMedian'] , ['Seasonal_WeekOfYear'] , ['MLP'] );
|
{
"content_hash": "2805bcdfcc6870582c7e927dcf3157d5",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 93,
"avg_line_length": 41.5,
"alnum_prop": 0.7228915662650602,
"repo_name": "antoinecarme/pyaf",
"id": "0bdd8296f20d71dd5a3cb82ce43b32a418524f70",
"size": "166",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_Difference/model_control_one_enabled_Difference_MovingMedian_Seasonal_WeekOfYear_MLP.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
import distutils.spawn
import importlib
import multiprocessing
import os
import shutil
import sys
import types
import traceback
from datetime import datetime
import attributes
from lib import utilities
class Attribute(object):
def __init__(self, attribute, **goptions):
self.name = attribute.get('name', '')
self.initial = attribute.get('initial', '').lower()
self.weight = attribute.get('weight', 0.0)
self.enabled = attribute.get('enabled', True)
self.requires_source = attribute.get('requires_source', False)
self.essential = attribute.get('essential', False)
self.persist = attribute.get('persist', True)
self.dependencies = attribute.get('dependencies', list())
self.options = goptions
self.options.update(attribute.get('options', dict()))
self.reference = importlib.import_module('{0}.main'.format(self.name))
def run(self, project_id, repository_path, cursor, outq):
result = self.reference.run(
project_id, repository_path, cursor, **self.options
)
outq.put(result)
@property
def timeout(self):
return self.options.get('timeout', None)
def __getstate__(self):
state = self.__dict__.copy()
if isinstance(self.reference, types.ModuleType):
state['reference'] = self.reference.__name__
return state
def __setstate__(self, state):
self.__dict__.update(state)
if isinstance(self.reference, str):
self.reference = importlib.import_module(
'{0}.main'.format(self.name)
)
class Attributes(object):
def __init__(
self, attributes, database, cleanup=False, keystring=None, **goptions
):
self.attributes = None
self.database = database
self.today = goptions.get('today', str(datetime.today().date()))
self.cleanup = cleanup
self._parse_attributes(attributes, **goptions)
self._parse_keystring(keystring)
def global_init(self, samples):
try:
if not self._validate_dependencies():
raise Exception(
'Missing dependencies must be installed to continue.'
)
self.database.connect()
for attribute in self.attributes:
if hasattr(attribute.reference, 'global_init'):
with self.database.cursor() as cursor:
attribute.reference.global_init(cursor, samples)
finally:
self.database.disconnect()
def run(self, project_id, repository_root):
rresults = dict()
repository_home = os.path.join(repository_root, str(project_id))
outq = multiprocessing.Queue(maxsize=1)
try:
self.database.connect()
repository_path = None
if self.requires_source:
repository_path = self._init_repository(
project_id, repository_home
)
for attribute in self.attributes:
bresult = False
rresult = None
if not attribute.enabled:
continue
with self.database.cursor() as cursor:
if hasattr(attribute.reference, 'init'):
attribute.reference.init(cursor)
with self.database.cursor() as cursor:
timeout = utilities.parse_datetime_delta(attribute.timeout)
process = multiprocessing.Process(
target=attribute.run,
args=(project_id, repository_path, cursor, outq)
)
process.start()
process.join(timeout=timeout.total_seconds())
if not outq.empty():
(bresult, rresult) = outq.get()
else:
sys.stderr.write(
(
' \033[91mWARNING\033[0m [{0:10d}] '
'{1} timed out\n'
).format(project_id, attribute.name)
)
if process.is_alive():
process.terminate()
rresults[attribute.name] = rresult
except:
sys.stderr.write('Exception\n\n')
sys.stderr.write(' Project ID {0}\n'.format(project_id))
extype, exvalue, extrace = sys.exc_info()
traceback.print_exception(extype, exvalue, extrace)
finally:
self.database.disconnect()
if self.cleanup:
self._cleanup(repository_home)
return rresults
def get(self, name):
for attribute in self.attributes:
if attribute.name == name:
return attribute
def score(self, rresults):
score = 0
for (attribute, rresult) in rresults.items():
attribute = self.get(attribute)
bresult = False
if type(rresult) is not str and rresult is not None:
if 'threshold' in attribute.options:
bresult = (rresult >= attribute.options['threshold'])
else:
bresult = bool(rresult)
# If an *essential* attribute is missing a ZERO score is assigned
if attribute.essential and bresult is False:
score = 0
break
score += bresult * attribute.weight
return score
@property
def is_persistence_enabled(self):
for attribute in self.attributes:
if attribute.persist:
return True
return False
@property
def requires_source(self):
for attribute in self.attributes:
if attribute.enabled and attribute.requires_source:
return True
return False
def _cleanup(self, repository_home):
shutil.rmtree(repository_home, ignore_errors=True)
def _init_repository(self, project_id, repository_home):
repository_path = repository_home # Default
if not os.path.exists(repository_path):
os.mkdir(repository_path)
items = os.listdir(repository_path)
if items:
for item in os.listdir(repository_path):
itempath = os.path.join(repository_path, item)
if os.path.isdir(itempath):
repository_path = itempath
break
else:
(repo_owner, repo_name) = self.database.get(
'''
SELECT u.login, p.name
FROM projects p
JOIN users u ON u.id = p.owner_id
WHERE p.id = {0}
'''.format(project_id)
)
if not (repo_owner or repo_name):
raise ValueError('Invalid project ID {0}.'.format(project_id))
last_commit_date = self.database.get(
'''
SELECT DATE(c.created_at)
FROM project_commits pc
JOIN commits c ON c.id = pc.commit_id
WHERE pc.project_id = {0}
ORDER BY c.created_at DESC
LIMIT 1
'''.format(project_id)
)
if last_commit_date is None:
last_commit_date = self.today
repository_path = utilities.clone(
repo_owner, repo_name, repository_path, last_commit_date
)
return repository_path
def _parse_attributes(self, attributes, **goptions):
if attributes:
self.attributes = list()
for attribute in attributes:
self.attributes.append(Attribute(attribute, **goptions))
def _disable_attributes(self):
for attribute in self.attributes:
attribute.enabled = False
def _disable_persistence(self):
for attribute in self.attributes:
attribute.persist = False
def _parse_keystring(self, keystring):
if keystring:
# Clean the slate
self._disable_attributes()
self._disable_persistence()
for key in keystring:
attribute = next(
attribute
for attribute in self.attributes
if attribute.initial == key.lower()
)
attribute.enabled = True
attribute.persist = key.isupper()
def _validate_dependencies(self):
valid = True
for attribute in self.attributes:
if attribute.enabled and attribute.dependencies:
for dependency in attribute.dependencies:
if not distutils.spawn.find_executable(dependency):
sys.stderr.write(
'[{0}] Dependency {1} missing\n'.format(
attribute.name, dependency
)
)
valid = False
return valid
|
{
"content_hash": "a7ebe06f1ce8059720a3a4d4b5ed95c3",
"timestamp": "",
"source": "github",
"line_count": 270,
"max_line_length": 79,
"avg_line_length": 34.15185185185185,
"alnum_prop": 0.5294436612081119,
"repo_name": "RepoReapers/reaper",
"id": "221e40f1b080559379f985b99d2c2bacb2c03e52",
"size": "9221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/attributes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "133"
},
{
"name": "JavaScript",
"bytes": "392"
},
{
"name": "Objective-C",
"bytes": "59"
},
{
"name": "Python",
"bytes": "145851"
},
{
"name": "Shell",
"bytes": "4599"
}
],
"symlink_target": ""
}
|
"""Test the cbuildbot_run module."""
import logging
import os
import cPickle
import sys
import time
sys.path.insert(0, os.path.abspath('%s/../..' % os.path.dirname(__file__)))
from chromite.cbuildbot import cbuildbot_config
from chromite.cbuildbot import cbuildbot_run
from chromite.lib import cros_test_lib
from chromite.lib import parallel
import mock
DEFAULT_ARCHIVE_GS_PATH = 'bogus_bucket/TheArchiveBase'
DEFAULT_ARCHIVE_BASE = 'gs://%s' % DEFAULT_ARCHIVE_GS_PATH
DEFAULT_BUILDROOT = '/tmp/foo/bar/buildroot'
DEFAULT_BUILDNUMBER = 12345
DEFAULT_BRANCH = 'TheBranch'
DEFAULT_CHROME_BRANCH = 'TheChromeBranch'
DEFAULT_VERSION_STRING = 'TheVersionString'
DEFAULT_BOARD = 'TheBoard'
DEFAULT_BOT_NAME = 'TheCoolBot'
# Access to protected member.
# pylint: disable=W0212
DEFAULT_OPTIONS = cros_test_lib.EasyAttr(
archive_base=DEFAULT_ARCHIVE_BASE,
buildroot=DEFAULT_BUILDROOT,
buildnumber=DEFAULT_BUILDNUMBER,
buildbot=True,
branch=DEFAULT_BRANCH,
remote_trybot=False,
debug=False,
postsync_patch=True,
)
DEFAULT_CONFIG = cbuildbot_config._config(
name=DEFAULT_BOT_NAME,
master=True,
boards=[DEFAULT_BOARD],
postsync_patch=True,
child_configs=[cbuildbot_config._config(name='foo', postsync_patch=False,
boards=[]),
cbuildbot_config._config(name='bar', postsync_patch=False,
boards=[]),
],
)
DEFAULT_VERSION = '6543.2.1'
def _ExtendDefaultOptions(**kwargs):
"""Extend DEFAULT_OPTIONS with keys/values in kwargs."""
options_kwargs = DEFAULT_OPTIONS.copy()
options_kwargs.update(kwargs)
return cros_test_lib.EasyAttr(**options_kwargs)
def _ExtendDefaultConfig(**kwargs):
"""Extend DEFAULT_CONFIG with keys/values in kwargs."""
config_kwargs = DEFAULT_CONFIG.copy()
config_kwargs.update(kwargs)
return cbuildbot_config._config(**config_kwargs)
class ExceptionsTest(cros_test_lib.TestCase):
"""Test that the exceptions in the module are sane."""
def _TestException(self, err, expected_startswith):
"""Test that str and pickle behavior of |err| are as expected."""
err2 = cPickle.loads(cPickle.dumps(err, cPickle.HIGHEST_PROTOCOL))
self.assertTrue(str(err).startswith(expected_startswith))
self.assertEqual(str(err), str(err2))
def testParallelAttributeError(self):
"""Test ParallelAttributeError message and pickle behavior."""
err1 = cbuildbot_run.ParallelAttributeError('SomeAttr')
self._TestException(err1, 'No such parallel run attribute')
err2 = cbuildbot_run.ParallelAttributeError('SomeAttr', 'SomeBoard',
'SomeTarget')
self._TestException(err2, 'No such board-specific parallel run attribute')
def testAttrSepCountError(self):
"""Test AttrSepCountError message and pickle behavior."""
err1 = cbuildbot_run.AttrSepCountError('SomeAttr')
self._TestException(err1, 'Attribute name has an unexpected number')
def testAttrNotPickleableError(self):
"""Test AttrNotPickleableError message and pickle behavior."""
err1 = cbuildbot_run.AttrNotPickleableError('SomeAttr', 'SomeValue')
self._TestException(err1, 'Run attribute "SomeAttr" value cannot')
# TODO(mtennant): Turn this into a PartialMock.
class _BuilderRunTestCase(cros_test_lib.MockTestCase):
"""Provide methods for creating BuilderRun or ChildBuilderRun."""
def setUp(self):
self._manager = parallel.Manager()
# Mimic entering a 'with' statement.
self._manager.__enter__()
def tearDown(self):
# Mimic exiting a 'with' statement.
self._manager.__exit__(None, None, None)
def _NewRunAttributes(self):
return cbuildbot_run.RunAttributes(self._manager)
def _NewBuilderRun(self, options=None, config=None):
"""Create a BuilderRun objection from options and config values.
Args:
options: Specify options or default to DEFAULT_OPTIONS.
config: Specify build config or default to DEFAULT_CONFIG.
Returns:
BuilderRun object.
"""
options = options or DEFAULT_OPTIONS
config = config or DEFAULT_CONFIG
return cbuildbot_run.BuilderRun(options, config, self._manager)
def _NewChildBuilderRun(self, child_index, options=None, config=None):
"""Create a ChildBuilderRun objection from options and config values.
Args:
child_index: Index of child config to use within config.
options: Specify options or default to DEFAULT_OPTIONS.
config: Specify build config or default to DEFAULT_CONFIG.
Returns:
ChildBuilderRun object.
"""
run = self._NewBuilderRun(options, config)
return cbuildbot_run.ChildBuilderRun(run, child_index)
class BuilderRunPickleTest(_BuilderRunTestCase):
"""Make sure BuilderRun objects can be pickled."""
def setUp(self):
self.real_config = cbuildbot_config.config['x86-alex-release-group']
self.PatchObject(cbuildbot_run._BuilderRunBase, 'GetVersion',
return_value=DEFAULT_VERSION)
def _TestPickle(self, run1):
self.assertEquals(DEFAULT_VERSION, run1.GetVersion())
run1.attrs.release_tag = 'TheReleaseTag'
# Accessing a method on BuilderRun has special behavior, so access and
# use one before pickling.
patch_after_sync = run1.ShouldPatchAfterSync()
# Access the archive object before pickling, too.
upload_url = run1.GetArchive().upload_url
# Pickle and unpickle run1 into run2.
run2 = cPickle.loads(cPickle.dumps(run1, cPickle.HIGHEST_PROTOCOL))
self.assertEquals(run1.buildnumber, run2.buildnumber)
self.assertEquals(run1.config.boards, run2.config.boards)
self.assertEquals(run1.options.branch, run2.options.branch)
self.assertEquals(run1.attrs.release_tag, run2.attrs.release_tag)
self.assertRaises(AttributeError, getattr, run1.attrs, 'manifest_manager')
self.assertRaises(AttributeError, getattr, run2.attrs, 'manifest_manager')
self.assertEquals(patch_after_sync, run2.ShouldPatchAfterSync())
self.assertEquals(upload_url, run2.GetArchive().upload_url)
# The attrs objects should be identical.
self.assertTrue(run1.attrs is run2.attrs)
# And the run objects themselves are different.
self.assertFalse(run1 is run2)
def testPickleBuilderRun(self):
self._TestPickle(self._NewBuilderRun(config=self.real_config))
def testPickleChildBuilderRun(self):
self._TestPickle(self._NewChildBuilderRun(0, config=self.real_config))
class BuilderRunTest(_BuilderRunTestCase):
"""Test the BuilderRun class."""
def testInit(self):
with mock.patch.object(cbuildbot_run._BuilderRunBase, 'GetVersion') as m:
m.return_value = DEFAULT_VERSION
run = self._NewBuilderRun()
self.assertEquals(DEFAULT_BUILDROOT, run.buildroot)
self.assertEquals(DEFAULT_BUILDNUMBER, run.buildnumber)
self.assertEquals(DEFAULT_BRANCH, run.manifest_branch)
self.assertEquals(DEFAULT_OPTIONS, run.options)
self.assertEquals(DEFAULT_CONFIG, run.config)
self.assertTrue(isinstance(run.attrs, cbuildbot_run.RunAttributes))
self.assertTrue(isinstance(run.GetArchive(),
cbuildbot_run.archive_lib.Archive))
# Make sure methods behave normally, since BuilderRun messes with them.
meth1 = run.GetVersionInfo
meth2 = run.GetVersionInfo
self.assertEqual(meth1.__name__, meth2.__name__)
# We actually do not support identity and equality checks right now.
self.assertNotEqual(meth1, meth2)
self.assertFalse(meth1 is meth2)
def testOptions(self):
options = _ExtendDefaultOptions(foo=True, bar=10)
run = self._NewBuilderRun(options=options)
self.assertEquals(True, run.options.foo)
self.assertEquals(10, run.options.__getattr__('bar'))
self.assertRaises(AttributeError, run.options.__getattr__, 'baz')
def testConfig(self):
config = _ExtendDefaultConfig(foo=True, bar=10)
run = self._NewBuilderRun(config=config)
self.assertEquals(True, run.config.foo)
self.assertEquals(10, run.config.__getattr__('bar'))
self.assertRaises(AttributeError, run.config.__getattr__, 'baz')
def testAttrs(self):
run = self._NewBuilderRun()
# manifest_manager is a valid run attribute. It gives Attribute error
# if accessed before being set, but thereafter works fine.
self.assertRaises(AttributeError, run.attrs.__getattribute__,
'manifest_manager')
run.attrs.manifest_manager = 'foo'
self.assertEquals('foo', run.attrs.manifest_manager)
self.assertEquals('foo', run.attrs.__getattribute__('manifest_manager'))
# foobar is not a valid run attribute. It gives AttributeError when
# accessed or changed.
self.assertRaises(AttributeError, run.attrs.__getattribute__, 'foobar')
self.assertRaises(AttributeError, run.attrs.__setattr__, 'foobar', 'foo')
def testArchive(self):
run = self._NewBuilderRun()
with mock.patch.object(cbuildbot_run._BuilderRunBase, 'GetVersion') as m:
m.return_value = DEFAULT_VERSION
archive = run.GetArchive()
# Check archive.archive_path.
expected = ('%s/%s/%s/%s' %
(DEFAULT_BUILDROOT,
cbuildbot_run.archive_lib.Archive._BUILDBOT_ARCHIVE,
DEFAULT_BOT_NAME, DEFAULT_VERSION))
self.assertEqual(expected, archive.archive_path)
# Check archive.upload_url.
expected = '%s/%s/%s' % (DEFAULT_ARCHIVE_BASE, DEFAULT_BOT_NAME,
DEFAULT_VERSION)
self.assertEqual(expected, archive.upload_url)
# Check archive.download_url.
expected = ('%s%s/%s/%s' %
(cbuildbot_run.archive_lib.gs.PRIVATE_BASE_HTTPS_URL,
DEFAULT_ARCHIVE_GS_PATH, DEFAULT_BOT_NAME, DEFAULT_VERSION))
self.assertEqual(expected, archive.download_url)
def _RunAccessor(self, method_name, options_dict, config_dict):
"""Run the given accessor method of the BuilderRun class.
Create a BuilderRun object with the options and config provided and
then return the result of calling the given method on it.
Args:
method_name: A BuilderRun method to call, specified by name.
options_dict: Extend default options with this.
config_dict: Extend default config with this.
Returns:
Result of calling the given method.
"""
options = _ExtendDefaultOptions(**options_dict)
config = _ExtendDefaultConfig(**config_dict)
run = self._NewBuilderRun(options=options, config=config)
method = getattr(run, method_name)
self.assertEqual(method.__name__, method_name)
return method()
def testDualEnableSetting(self):
settings = {
'prebuilts': 'ShouldUploadPrebuilts',
'postsync_patch': 'ShouldPatchAfterSync',
}
# Both option and config enabled should result in True.
# Create truth table with three variables in this order:
# <key> option value, <key> config value (e.g. <key> == 'prebuilts').
truth_table = cros_test_lib.TruthTable(inputs=[(True, True)])
for inputs in truth_table:
option_val, config_val = inputs
for key, accessor in settings.iteritems():
self.assertEquals(
self._RunAccessor(accessor, {key: option_val}, {key: config_val}),
truth_table.GetOutput(inputs))
def testShouldReexecAfterSync(self):
# If option and config have postsync_reexec enabled, and this file is not
# in the build root, then we expect ShouldReexecAfterSync to return True.
# Construct a truth table across three variables in this order:
# postsync_reexec option value, postsync_reexec config value, same_root.
truth_table = cros_test_lib.TruthTable(inputs=[(True, True, False)])
for inputs in truth_table:
option_val, config_val, same_root = inputs
if same_root:
build_root = os.path.dirname(os.path.dirname(__file__))
else:
build_root = DEFAULT_BUILDROOT
result = self._RunAccessor(
'ShouldReexecAfterSync',
{'postsync_reexec': option_val, 'buildroot': build_root},
{'postsync_reexec': config_val})
self.assertEquals(result, truth_table.GetOutput(inputs))
class GetVersionTest(_BuilderRunTestCase):
"""Test the GetVersion and GetVersionInfo methods of BuilderRun class."""
# Access to protected member.
# pylint: disable=W0212
def testGetVersionInfo(self):
verinfo = object()
with mock.patch('cbuildbot_run.manifest_version.VersionInfo.from_repo',
return_value=verinfo) as m:
result = cbuildbot_run._BuilderRunBase.GetVersionInfo(DEFAULT_BUILDROOT)
self.assertEquals(result, verinfo)
m.assert_called_once_with(DEFAULT_BUILDROOT)
def _TestGetVersionReleaseTag(self, release_tag):
with mock.patch.object(cbuildbot_run._BuilderRunBase,
'GetVersionInfo') as m:
verinfo_mock = mock.Mock()
verinfo_mock.chrome_branch = DEFAULT_CHROME_BRANCH
verinfo_mock.VersionString = mock.Mock(return_value='VS')
m.return_value = verinfo_mock
# Prepare a real BuilderRun object with a release tag.
run = self._NewBuilderRun()
run.attrs.release_tag = release_tag
# Run the test return the result.
result = run.GetVersion()
m.assert_called_once_with(DEFAULT_BUILDROOT)
if release_tag is None:
verinfo_mock.VersionString.assert_called_once()
return result
def testGetVersionReleaseTag(self):
result = self._TestGetVersionReleaseTag('RT')
self.assertEquals('R%s-%s' % (DEFAULT_CHROME_BRANCH, 'RT'), result)
def testGetVersionNoReleaseTag(self):
result = self._TestGetVersionReleaseTag(None)
expected_result = ('R%s-%s-b%s' %
(DEFAULT_CHROME_BRANCH, 'VS', DEFAULT_BUILDNUMBER))
self.assertEquals(result, expected_result)
class ChildBuilderRunTest(_BuilderRunTestCase):
"""Test the ChildBuilderRun class"""
def testInit(self):
with mock.patch.object(cbuildbot_run._BuilderRunBase, 'GetVersion') as m:
m.return_value = DEFAULT_VERSION
crun = self._NewChildBuilderRun(0)
self.assertEquals(DEFAULT_BUILDROOT, crun.buildroot)
self.assertEquals(DEFAULT_BUILDNUMBER, crun.buildnumber)
self.assertEquals(DEFAULT_BRANCH, crun.manifest_branch)
self.assertEquals(DEFAULT_OPTIONS, crun.options)
self.assertEquals(DEFAULT_CONFIG.child_configs[0], crun.config)
self.assertEquals('foo', crun.config.name)
self.assertTrue(isinstance(crun.attrs, cbuildbot_run.RunAttributes))
self.assertTrue(isinstance(crun.GetArchive(),
cbuildbot_run.archive_lib.Archive))
# Make sure methods behave normally, since BuilderRun messes with them.
meth1 = crun.GetVersionInfo
meth2 = crun.GetVersionInfo
self.assertEqual(meth1.__name__, meth2.__name__)
# We actually do not support identity and equality checks right now.
self.assertNotEqual(meth1, meth2)
self.assertFalse(meth1 is meth2)
class RunAttributesTest(_BuilderRunTestCase):
"""Test the RunAttributes class."""
BOARD = 'SomeBoard'
TARGET = 'SomeConfigName'
VALUE = 'AnyValueWillDo'
# Any valid board-specific attribute will work here.
BATTR = 'breakpad_symbols_generated'
UNIQUIFIED_BATTR = cbuildbot_run.RunAttributes._GetBoardAttrName(
BATTR, BOARD, TARGET)
def testRegisterBoardTarget(self):
"""Test behavior of attributes before and after registering board target."""
ra = self._NewRunAttributes()
self.assertFalse(ra.HasBoardParallel(self.BATTR, self.BOARD, self.TARGET))
self.assertFalse(ra.HasParallel(self.UNIQUIFIED_BATTR))
ra.RegisterBoardAttrs(self.BOARD, self.TARGET)
self.assertFalse(ra.HasBoardParallel(self.BATTR, self.BOARD, self.TARGET))
self.assertFalse(ra.HasParallel(self.UNIQUIFIED_BATTR))
ra.SetBoardParallel(self.BATTR, 'TheValue', self.BOARD, self.TARGET)
self.assertTrue(ra.HasBoardParallel(self.BATTR, self.BOARD, self.TARGET))
self.assertTrue(ra.HasParallel(self.UNIQUIFIED_BATTR))
def testSetGet(self):
"""Test simple set/get of regular and parallel run attributes."""
ra = self._NewRunAttributes()
value = 'foobar'
# Set/Get a regular run attribute using direct access.
ra.release_tag = value
self.assertEqual(value, ra.release_tag)
# Set/Get of a parallel run attribute using direct access fails.
self.assertRaises(AttributeError, setattr, ra, 'unittest_value', value)
self.assertRaises(AttributeError, getattr, ra, 'unittest_value')
# Set/Get of a parallel run attribute with supported interface.
ra.SetParallel('unittest_value', value)
self.assertEqual(value, ra.GetParallel('unittest_value'))
# Set/Get a board parallel run attribute, testing both the encouraged
# interface and the underlying interface.
ra.RegisterBoardAttrs(self.BOARD, self.TARGET)
ra.SetBoardParallel(self.BATTR, value, self.BOARD, self.TARGET)
self.assertEqual(value,
ra.GetBoardParallel(self.BATTR, self.BOARD, self.TARGET))
self.assertEqual(value,
ra.GetParallel(self.UNIQUIFIED_BATTR))
def testSetDefault(self):
"""Test setting default value of parallel run attributes."""
ra = self._NewRunAttributes()
value = 'foobar'
# Attribute starts off not set.
self.assertFalse(ra.HasParallel('unittest_value'))
# Use SetParallelDefault to set it.
ra.SetParallelDefault('unittest_value', value)
self.assertTrue(ra.HasParallel('unittest_value'))
self.assertEqual(value, ra.GetParallel('unittest_value'))
# Calling SetParallelDefault again has no effect.
ra.SetParallelDefault('unittest_value', 'junk')
self.assertTrue(ra.HasParallel('unittest_value'))
self.assertEqual(value, ra.GetParallel('unittest_value'))
# Run through same sequence for a board-specific attribute.
# Attribute starts off not set.
self.assertFalse(ra.HasBoardParallel(self.BATTR, self.BOARD, self.TARGET))
ra.RegisterBoardAttrs(self.BOARD, self.TARGET)
self.assertFalse(ra.HasBoardParallel(self.BATTR, self.BOARD, self.TARGET))
# Use SetBoardParallelDefault to set it.
ra.SetBoardParallelDefault(self.BATTR, value, self.BOARD, self.TARGET)
self.assertTrue(ra.HasBoardParallel(self.BATTR, self.BOARD, self.TARGET))
self.assertEqual(value,
ra.GetBoardParallel(self.BATTR, self.BOARD, self.TARGET))
# Calling SetBoardParallelDefault again has no effect.
ra.SetBoardParallelDefault(self.BATTR, 'junk', self.BOARD, self.TARGET)
self.assertTrue(ra.HasBoardParallel(self.BATTR, self.BOARD, self.TARGET))
self.assertEqual(value,
ra.GetBoardParallel(self.BATTR, self.BOARD, self.TARGET))
def testAttributeError(self):
"""Test accessing run attributes that do not exist."""
ra = self._NewRunAttributes()
value = 'foobar'
# Set/Get on made up attribute name.
self.assertRaises(AttributeError, setattr, ra, 'foo', value)
self.assertRaises(AttributeError, getattr, ra, 'foo')
# self.UNIQUIFIED_BATTR is valid, but only if board/target registered first.
self.assertRaises(AttributeError, ra.GetBoardParallel,
self.BATTR, self.BOARD, self.TARGET)
self.assertRaises(AttributeError, ra.SetParallel,
self.UNIQUIFIED_BATTR, value)
self.assertRaises(AttributeError, ra.GetParallel, self.UNIQUIFIED_BATTR)
class BoardRunAttributesTest(_BuilderRunTestCase):
"""Test the BoardRunAttributes class."""
BOARD = 'SomeBoard'
TARGET = 'SomeConfigName'
VALUE = 'AnyValueWillDo'
# Any valid board-specific attribute will work here.
BATTR = 'breakpad_symbols_generated'
class _SetAttr(object):
"""Stage-like class to set attr on a BoardRunAttributes obj."""
def __init__(self, bra, attr, value, delay=1):
self.bra = bra
self.attr = attr
self.value = value
self.delay = delay
def Run(self):
if self.delay:
time.sleep(self.delay)
self.bra.SetParallel(self.attr, self.value)
class _WaitForAttr(object):
"""Stage-like class to wait for attr on BoardRunAttributes obj."""
def __init__(self, bra, attr, expected_value, timeout=10):
self.bra = bra
self.attr = attr
self.expected_value = expected_value
self.timeout = timeout
def GetParallel(self):
return self.bra.GetParallel(self.attr, timeout=self.timeout)
class _CheckWaitForAttr(_WaitForAttr):
"""Stage-like class to wait for then check attr on BoardRunAttributes."""
def Run(self):
value = self.GetParallel()
assert value == self.expected_value, \
('For run attribute %s expected value %r but got %r.' %
(self.attr, self.expected_value, value))
class _TimeoutWaitForAttr(_WaitForAttr):
"""Stage-like class to time-out waiting for attr on BoardRunAttributes."""
def Run(self):
try:
self.GetParallel()
assert False, 'Expected AttrTimeoutError'
except cbuildbot_run.AttrTimeoutError:
pass
def setUp(self):
self.ra = self._NewRunAttributes()
self.bra = self.ra.RegisterBoardAttrs(self.BOARD, self.TARGET)
def _TestParallelSetGet(self, stage_args):
"""Helper to run "stages" in parallel, according to |stage_args|.
Args:
stage_args: List of tuples of the form (stage_object, extra_args, ...)
where stage_object has a Run method which takes a BoardRunAttributes
object as the first argument and extra_args for the remaining arguments.
"""
stages = [a[0](self.bra, *a[1:]) for a in stage_args]
steps = [stage.Run for stage in stages]
parallel.RunParallelSteps(steps)
def testParallelSetGetFast(self):
"""Pass the parallel run attribute around with no delay."""
stage_args = [
(self._CheckWaitForAttr, self.BATTR, self.VALUE),
(self._SetAttr, self.BATTR, self.VALUE),
]
self._TestParallelSetGet(stage_args)
self.assertRaises(AttributeError,
getattr, self.bra, self.BATTR)
self.assertEqual(self.VALUE, self.bra.GetParallel(self.BATTR))
def testParallelSetGetSlow(self):
"""Pass the parallel run attribute around with a delay."""
stage_args = [
(self._SetAttr, self.BATTR, self.VALUE, 10),
(self._TimeoutWaitForAttr, self.BATTR, self.VALUE, 2),
]
self._TestParallelSetGet(stage_args)
self.assertEqual(self.VALUE, self.bra.GetParallel(self.BATTR))
def testParallelSetGetManyGets(self):
"""Set the parallel run attribute in one stage, access in many stages."""
stage_args = [
(self._SetAttr, self.BATTR, self.VALUE, 8),
(self._CheckWaitForAttr, self.BATTR, self.VALUE, 16),
(self._CheckWaitForAttr, self.BATTR, self.VALUE, 16),
(self._CheckWaitForAttr, self.BATTR, self.VALUE, 16),
(self._TimeoutWaitForAttr, self.BATTR, self.VALUE, 1),
]
self._TestParallelSetGet(stage_args)
self.assertEqual(self.VALUE, self.bra.GetParallel(self.BATTR))
def testParallelSetGetManySets(self):
"""Set the parallel run attribute in many stages, access in one stage."""
# Three "stages" set the value, with increasing delays. The stage that
# checks the value should get the first value set.
stage_args = [
(self._SetAttr, self.BATTR, self.VALUE + '1', 1),
(self._SetAttr, self.BATTR, self.VALUE + '2', 11),
(self._CheckWaitForAttr, self.BATTR, self.VALUE + '1', 12),
]
self._TestParallelSetGet(stage_args)
self.assertEqual(self.VALUE + '2', self.bra.GetParallel(self.BATTR))
def testSetGet(self):
"""Test that board-specific attrs do not work with set/get directly."""
self.assertRaises(AttributeError, setattr,
self.bra, 'breakpad_symbols_generated', self.VALUE)
self.assertRaises(AttributeError, getattr,
self.bra, 'breakpad_symbols_generated')
def testAccessRegularRunAttr(self):
"""Test that regular attributes are not known to BoardRunAttributes."""
self.assertRaises(AttributeError, getattr, self.bra, 'release_tag')
self.assertRaises(AttributeError, setattr, self.bra, 'release_tag', 'foo')
if __name__ == '__main__':
cros_test_lib.main(level=logging.DEBUG)
|
{
"content_hash": "c17b0c0971d3f2e73b83f513c1da598c",
"timestamp": "",
"source": "github",
"line_count": 646,
"max_line_length": 80,
"avg_line_length": 37.77554179566563,
"alnum_prop": 0.6922509527517109,
"repo_name": "chadversary/chromiumos.chromite",
"id": "970f8e4216e164f25ab26efee58def954d790f2c",
"size": "24592",
"binary": false,
"copies": "1",
"ref": "refs/heads/fix-repo-mirror",
"path": "cbuildbot/cbuildbot_run_unittest.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "85"
},
{
"name": "Python",
"bytes": "3652882"
},
{
"name": "Shell",
"bytes": "24031"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from operator import itemgetter
import six
from pyga import utils
from pyga import exceptions
__author__ = "Arun KR (kra3) <the1.arun@gmail.com>"
__license__ = "Simplified BSD"
class Campaign(object):
'''
A representation of Campaign
Properties:
_type -- See TYPE_* constants, will be mapped to "__utmz" parameter.
creation_time -- Time of the creation of this campaign, will be mapped to "__utmz" parameter.
response_count -- Response Count, will be mapped to "__utmz" parameter.
Is also used to determine whether the campaign is new or repeated,
which will be mapped to "utmcn" and "utmcr" parameters.
id -- Campaign ID, a.k.a. "utm_id" query parameter for ga.js
Will be mapped to "__utmz" parameter.
source -- Source, a.k.a. "utm_source" query parameter for ga.js.
Will be mapped to "utmcsr" key in "__utmz" parameter.
g_click_id -- Google AdWords Click ID, a.k.a. "gclid" query parameter for ga.js.
Will be mapped to "utmgclid" key in "__utmz" parameter.
d_click_id -- DoubleClick (?) Click ID. Will be mapped to "utmdclid" key in "__utmz" parameter.
name -- Name, a.k.a. "utm_campaign" query parameter for ga.js.
Will be mapped to "utmccn" key in "__utmz" parameter.
medium -- Medium, a.k.a. "utm_medium" query parameter for ga.js.
Will be mapped to "utmcmd" key in "__utmz" parameter.
term -- Terms/Keywords, a.k.a. "utm_term" query parameter for ga.js.
Will be mapped to "utmctr" key in "__utmz" parameter.
content -- Ad Content Description, a.k.a. "utm_content" query parameter for ga.js.
Will be mapped to "utmcct" key in "__utmz" parameter.
'''
TYPE_DIRECT = 'direct'
TYPE_ORGANIC = 'organic'
TYPE_REFERRAL = 'referral'
CAMPAIGN_DELIMITER = '|'
UTMZ_PARAM_MAP = {
'utmcid': 'id',
'utmcsr': 'source',
'utmgclid': 'g_click_id',
'utmdclid': 'd_click_id',
'utmccn': 'name',
'utmcmd': 'medium',
'utmctr': 'term',
'utmcct': 'content',
}
def __init__(self, typ):
self._type = None
self.creation_time = None
self.response_count = 0
self.id = None
self.source = None
self.g_click_id = None
self.d_click_id = None
self.name = None
self.medium = None
self.term = None
self.content = None
if typ:
if typ not in ('direct', 'organic', 'referral'):
raise ValueError('Campaign type has to be one of the Campaign::TYPE_* constant values.')
self._type = typ
if typ == Campaign.TYPE_DIRECT:
self.name = '(direct)'
self.source = '(direct)'
self.medium = '(none)'
elif typ == Campaign.TYPE_REFERRAL:
self.name = '(referral)'
self.medium = 'referral'
elif typ == Campaign.TYPE_ORGANIC:
self.name = '(organic)'
self.medium = 'organic'
else:
self._type = None
self.creation_time = datetime.utcnow()
def validate(self):
if not self.source:
raise exceptions.ValidationError('Campaigns need to have at least the "source" attribute defined.')
@staticmethod
def create_from_referrer(url):
obj = Campaign(Campaign.TYPE_REFERRAL)
parse_rslt = six.moves.urllib.parse.urlparse(url)
obj.source = parse_rslt.netloc
obj.content = parse_rslt.path
return obj
def extract_from_utmz(self, utmz):
parts = utmz.split('.', 4)
if len(parts) != 5:
raise ValueError('The given "__utmz" cookie value is invalid.')
self.creation_time = utils.convert_ga_timestamp(parts[1])
self.response_count = int(parts[3])
params = parts[4].split(Campaign.CAMPAIGN_DELIMITER)
for param in params:
key, val = param.split('=')
try:
setattr(self, self.UTMZ_PARAM_MAP[key], six.moves.urllib.parse.unquote_plus(val))
except KeyError:
continue
return self
class CustomVariable(object):
'''
Represent a Custom Variable
Properties:
index -- Is the slot, you have 5 slots
name -- Name given to custom variable
value -- Value for the variable
scope -- Scope can be any one of 1, 2 or 3.
WATCH OUT: It's a known issue that GA will not decode URL-encoded
characters in custom variable names and values properly, so spaces
will show up as "%20" in the interface etc. (applicable to name & value)
http://www.google.com/support/forum/p/Google%20Analytics/thread?tid=2cdb3ec0be32e078
'''
SCOPE_VISITOR = 1
SCOPE_SESSION = 2
SCOPE_PAGE = 3
def __init__(self, index=None, name=None, value=None, scope=3):
self.index = index
self.name = name
self.value = value
self.scope = CustomVariable.SCOPE_PAGE
if scope:
self.scope = scope
def __setattr__(self, name, value):
if name == 'scope':
if value and value not in range(1, 4):
raise ValueError('Custom Variable scope has to be one of the 1,2 or 3')
if name == 'index':
# Custom Variables are limited to five slots officially, but there seems to be a
# trick to allow for more of them which we could investigate at a later time (see
# http://analyticsimpact.com/2010/05/24/get-more-than-5-custom-variables-in-google-analytics/
if value and (value < 0 or value > 5):
raise ValueError('Custom Variable index has to be between 1 and 5.')
object.__setattr__(self, name, value)
def validate(self):
'''
According to the GA documentation, there is a limit to the combined size of
name and value of 64 bytes after URL encoding,
see http://code.google.com/apis/analytics/docs/tracking/gaTrackingCustomVariables.html#varTypes
and http://xahlee.org/js/google_analytics_tracker_2010-07-01_expanded.js line 563
This limit was increased to 128 bytes BEFORE encoding with the 2012-01 release of ga.js however,
see http://code.google.com/apis/analytics/community/gajs_changelog.html
'''
if len('%s%s' % (self.name, self.value)) > 128:
raise exceptions.ValidationError('Custom Variable combined name and value length must not be larger than 128 bytes.')
class Event(object):
'''
Represents an Event
https://developers.google.com/analytics/devguides/collection/gajs/eventTrackerGuide
Properties:
category -- The general event category
action -- The action for the event
label -- An optional descriptor for the event
value -- An optional value associated with the event. You can see your
event values in the Overview, Categories, and Actions reports,
where they are listed by event or aggregated across events,
depending upon your report view.
noninteraction -- By default, event hits will impact a visitor's bounce rate.
By setting this parameter to true, this event hit
will not be used in bounce rate calculations.
(default False)
'''
def __init__(self, category=None, action=None, label=None, value=None, noninteraction=False):
self.category = category
self.action = action
self.label = label
self.value = value
self.noninteraction = bool(noninteraction)
if self.noninteraction and not self.value:
self.value = 0
def validate(self):
if not(self.category and self.action):
raise exceptions.ValidationError('Events, at least need to have a category and action defined.')
class Item(object):
'''
Represents an Item in Transaction
Properties:
order_id -- Order ID, will be mapped to "utmtid" parameter
sku -- Product Code. This is the sku code for a given product, will be mapped to "utmipc" parameter
name -- Product Name, will be mapped to "utmipn" parameter
variation -- Variations on an item, will be mapped to "utmiva" parameter
price -- Unit Price. Value is set to numbers only, will be mapped to "utmipr" parameter
quantity -- Unit Quantity, will be mapped to "utmiqt" parameter
'''
def __init__(self):
self.order_id = None
self.sku = None
self.name = None
self.variation = None
self.price = None
self.quantity = 1
def validate(self):
if not self.sku:
raise exceptions.ValidationError('sku/product is a required parameter')
class Page(object):
'''
Contains all parameters needed for tracking a page
Properties:
path -- Page request URI, will be mapped to "utmp" parameter
title -- Page title, will be mapped to "utmdt" parameter
charset -- Charset encoding, will be mapped to "utmcs" parameter
referrer -- Referer URL, will be mapped to "utmr" parameter
load_time -- Page load time in milliseconds, will be encoded into "utme" parameter.
'''
REFERRER_INTERNAL = '0'
def __init__(self, path):
self.path = None
self.title = None
self.charset = None
self.referrer = None
self.load_time = None
if path:
self.path = path
def __setattr__(self, name, value):
if name == 'path':
if value and value != '':
if value[0] != '/':
raise ValueError('The page path should always start with a slash ("/").')
elif name == 'load_time':
if value and not isinstance(value, int):
raise ValueError('Page load time must be specified in integer milliseconds.')
object.__setattr__(self, name, value)
class Session(object):
'''
You should serialize this object and store it in the user session to keep it
persistent between requests (similar to the "__umtb" cookie of the GA Javascript client).
Properties:
session_id -- A unique per-session ID, will be mapped to "utmhid" parameter
track_count -- The amount of pageviews that were tracked within this session so far,
will be part of the "__utmb" cookie parameter.
Will get incremented automatically upon each request
start_time -- Timestamp of the start of this new session, will be part of the "__utmb" cookie parameter
'''
def __init__(self):
self.session_id = utils.get_32bit_random_num()
self.track_count = 0
self.start_time = datetime.utcnow()
@staticmethod
def generate_session_id():
return utils.get_32bit_random_num()
def extract_from_utmb(self, utmb):
'''
Will extract information for the "trackCount" and "startTime"
properties from the given "__utmb" cookie value.
'''
parts = utmb.split('.')
if len(parts) != 4:
raise ValueError('The given "__utmb" cookie value is invalid.')
self.track_count = int(parts[1])
self.start_time = utils.convert_ga_timestamp(parts[3])
return self
class SocialInteraction(object):
'''
Properties:
action -- Required. A string representing the social action being tracked,
will be mapped to "utmsa" parameter
network -- Required. A string representing the social network being tracked,
will be mapped to "utmsn" parameter
target -- Optional. A string representing the URL (or resource) which receives the action.
'''
def __init__(self, action=None, network=None, target=None):
self.action = action
self.network = network
self.target = target
def validate(self):
if not(self.action and self.network):
raise exceptions.ValidationError('Social interactions need to have at least the "network" and "action" attributes defined.')
class Transaction(object):
'''
Represents parameters for a Transaction call
Properties:
order_id -- Order ID, will be mapped to "utmtid" parameter
affiliation -- Affiliation, Will be mapped to "utmtst" parameter
total -- Total Cost, will be mapped to "utmtto" parameter
tax -- Tax Cost, will be mapped to "utmttx" parameter
shipping -- Shipping Cost, values as for unit and price, will be mapped to "utmtsp" parameter
city -- Billing City, will be mapped to "utmtci" parameter
state -- Billing Region, will be mapped to "utmtrg" parameter
country -- Billing Country, will be mapped to "utmtco" parameter
items -- @entity.Items in a transaction
'''
def __init__(self):
self.items = []
self.order_id = None
self.affiliation = None
self.total = None
self.tax = None
self.shipping = None
self.city = None
self.state = None
self.country = None
def __setattr__(self, name, value):
if name == 'order_id':
for itm in self.items:
itm.order_id = value
object.__setattr__(self, name, value)
def validate(self):
if len(self.items) == 0:
raise exceptions.ValidationError('Transaction need to consist of at least one item')
def add_item(self, item):
''' item of type entities.Item '''
if isinstance(item, Item):
item.order_id = self.order_id
self.items.append(item)
class Visitor(object):
'''
You should serialize this object and store it in the user database to keep it
persistent for the same user permanently (similar to the "__umta" cookie of
the GA Javascript client).
Properties:
unique_id -- Unique user ID, will be part of the "__utma" cookie parameter
first_visit_time -- Time of the very first visit of this user, will be part of the "__utma" cookie parameter
previous_visit_time -- Time of the previous visit of this user, will be part of the "__utma" cookie parameter
current_visit_time -- Time of the current visit of this user, will be part of the "__utma" cookie parameter
visit_count -- Amount of total visits by this user, will be part of the "__utma" cookie parameter
ip_address -- IP Address of the end user, will be mapped to "utmip" parameter and "X-Forwarded-For" request header
user_agent -- User agent string of the end user, will be mapped to "User-Agent" request header
locale -- Locale string (country part optional) will be mapped to "utmul" parameter
flash_version -- Visitor's Flash version, will be maped to "utmfl" parameter
java_enabled -- Visitor's Java support, will be mapped to "utmje" parameter
screen_colour_depth -- Visitor's screen color depth, will be mapped to "utmsc" parameter
screen_resolution -- Visitor's screen resolution, will be mapped to "utmsr" parameter
'''
def __init__(self):
now = datetime.utcnow()
self.unique_id = None
self.first_visit_time = now
self.previous_visit_time = now
self.current_visit_time = now
self.visit_count = 1
self.ip_address = None
self.user_agent = None
self.locale = None
self.flash_version = None
self.java_enabled = None
self.screen_colour_depth = None
self.screen_resolution = None
def __setattr__(self, name, value):
if name == 'unique_id':
if value and (value < 0 or value > 0x7fffffff):
raise ValueError('Visitor unique ID has to be a 32-bit integer between 0 and 0x7fffffff')
object.__setattr__(self, name, value)
def __getattribute__(self, name):
if name == 'unique_id':
tmp = object.__getattribute__(self, name)
if tmp is None:
self.unique_id = self.generate_unique_id()
return object.__getattribute__(self, name)
def __getstate__(self):
state = self.__dict__
if state.get('user_agent') is None:
state['unique_id'] = self.generate_unique_id()
return state
def extract_from_utma(self, utma):
'''
Will extract information for the "unique_id", "first_visit_time", "previous_visit_time",
"current_visit_time" and "visit_count" properties from the given "__utma" cookie value.
'''
parts = utma.split('.')
if len(parts) != 6:
raise ValueError('The given "__utma" cookie value is invalid.')
self.unique_id = int(parts[1])
self.first_visit_time = utils.convert_ga_timestamp(parts[2])
self.previous_visit_time = utils.convert_ga_timestamp(parts[3])
self.current_visit_time = utils.convert_ga_timestamp(parts[4])
self.visit_count = int(parts[5])
return self
def extract_from_server_meta(self, meta):
'''
Will extract information for the "ip_address", "user_agent" and "locale"
properties from the given WSGI REQUEST META variable or equivalent.
'''
if 'REMOTE_ADDR' in meta and meta['REMOTE_ADDR']:
ip = None
for key in ('HTTP_X_FORWARDED_FOR', 'REMOTE_ADDR'):
if key in meta and not ip:
ips = meta.get(key, '').split(',')
ip = ips[-1].strip()
if not utils.is_valid_ip(ip):
ip = ''
if utils.is_private_ip(ip):
ip = ''
if ip:
self.ip_address = ip
if 'HTTP_USER_AGENT' in meta and meta['HTTP_USER_AGENT']:
self.user_agent = meta['HTTP_USER_AGENT']
if 'HTTP_ACCEPT_LANGUAGE' in meta and meta['HTTP_ACCEPT_LANGUAGE']:
user_locals = []
matched_locales = utils.validate_locale(meta['HTTP_ACCEPT_LANGUAGE'])
if matched_locales:
lang_lst = map((lambda x: x.replace('-', '_')), (i[1] for i in matched_locales))
quality_lst = map((lambda x: x and x or 1), (float(i[4] and i[4] or '0') for i in matched_locales))
lang_quality_map = map((lambda x, y: (x, y)), lang_lst, quality_lst)
user_locals = [x[0] for x in sorted(lang_quality_map, key=itemgetter(1), reverse=True)]
if user_locals:
self.locale = user_locals[0]
return self
def generate_hash(self):
'''Generates a hashed value from user-specific properties.'''
tmpstr = "%s%s%s" % (self.user_agent, self.screen_resolution, self.screen_colour_depth)
return utils.generate_hash(tmpstr)
def generate_unique_id(self):
'''Generates a unique user ID from the current user-specific properties.'''
return ((utils.get_32bit_random_num() ^ self.generate_hash()) & 0x7fffffff)
def add_session(self, session):
'''
Updates the "previousVisitTime", "currentVisitTime" and "visitCount"
fields based on the given session object.
'''
start_time = session.start_time
if start_time != self.current_visit_time:
self.previous_visit_time = self.current_visit_time
self.current_visit_time = start_time
self.visit_count = self.visit_count + 1
|
{
"content_hash": "042e1064238b19663981df7f1b2f7c78",
"timestamp": "",
"source": "github",
"line_count": 504,
"max_line_length": 136,
"avg_line_length": 38.51190476190476,
"alnum_prop": 0.6116434827408552,
"repo_name": "pannal/Subliminal.bundle",
"id": "130c8db4627bade149bf8928cb02f0042d460bcc",
"size": "19435",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Contents/Libraries/Shared/pyga/entities.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3012769"
},
{
"name": "Python",
"bytes": "3311785"
},
{
"name": "Shell",
"bytes": "273"
}
],
"symlink_target": ""
}
|
"""Requirements specific to SQLAlchemy's own unit tests.
"""
import sys
from sqlalchemy import exc
from sqlalchemy import util
from sqlalchemy.sql import text
from sqlalchemy.testing import exclusions
from sqlalchemy.testing.exclusions import against
from sqlalchemy.testing.exclusions import fails_if
from sqlalchemy.testing.exclusions import fails_on
from sqlalchemy.testing.exclusions import fails_on_everything_except
from sqlalchemy.testing.exclusions import LambdaPredicate
from sqlalchemy.testing.exclusions import NotPredicate
from sqlalchemy.testing.exclusions import only_if
from sqlalchemy.testing.exclusions import only_on
from sqlalchemy.testing.exclusions import skip_if
from sqlalchemy.testing.exclusions import SpecPredicate
from sqlalchemy.testing.exclusions import succeeds_if
from sqlalchemy.testing.requirements import SuiteRequirements
def no_support(db, reason):
return SpecPredicate(db, description=reason)
def exclude(db, op, spec, description=None):
return SpecPredicate(db, op, spec, description=description)
class DefaultRequirements(SuiteRequirements):
@property
def deferrable_or_no_constraints(self):
"""Target database must support deferrable constraints."""
return skip_if(
[
no_support("firebird", "not supported by database"),
no_support("mysql", "not supported by database"),
no_support("mssql", "not supported by database"),
]
)
@property
def check_constraints(self):
"""Target database must support check constraints."""
return exclusions.open()
@property
def enforces_check_constraints(self):
"""Target database must also enforce check constraints."""
return self.check_constraints + fails_on(
self._mysql_check_constraints_dont_exist,
"check constraints don't enforce on MySQL, MariaDB<10.2",
)
@property
def named_constraints(self):
"""target database must support names for constraints."""
return exclusions.open()
@property
def implicitly_named_constraints(self):
"""target database must apply names to unnamed constraints."""
return skip_if([no_support("sqlite", "not supported by database")])
@property
def foreign_keys(self):
"""Target database must support foreign keys."""
return skip_if(no_support("sqlite", "not supported by database"))
@property
def on_update_cascade(self):
"""target database must support ON UPDATE..CASCADE behavior in
foreign keys."""
return skip_if(
["sqlite", "oracle"],
"target backend %(doesnt_support)s ON UPDATE CASCADE",
)
@property
def non_updating_cascade(self):
"""target database must *not* support ON UPDATE..CASCADE behavior in
foreign keys."""
return fails_on_everything_except("sqlite", "oracle") + skip_if(
"mssql"
)
@property
def recursive_fk_cascade(self):
"""target database must support ON DELETE CASCADE on a self-referential
foreign key"""
return skip_if(["mssql"])
@property
def deferrable_fks(self):
"""target database must support deferrable fks"""
return only_on(["oracle"])
@property
def foreign_key_constraint_option_reflection_ondelete(self):
return only_on(["postgresql", "mysql", "sqlite", "oracle"])
@property
def fk_constraint_option_reflection_ondelete_restrict(self):
return only_on(["postgresql", "sqlite", self._mysql_80])
@property
def fk_constraint_option_reflection_ondelete_noaction(self):
return only_on(["postgresql", "mysql", "sqlite"])
@property
def foreign_key_constraint_option_reflection_onupdate(self):
return only_on(["postgresql", "mysql", "sqlite"])
@property
def fk_constraint_option_reflection_onupdate_restrict(self):
return only_on(["postgresql", "sqlite", self._mysql_80])
@property
def comment_reflection(self):
return only_on(["postgresql", "mysql", "oracle"])
@property
def unbounded_varchar(self):
"""Target database must support VARCHAR with no length"""
return skip_if(
["firebird", "oracle", "mysql"], "not supported by database"
)
@property
def boolean_col_expressions(self):
"""Target database must support boolean expressions as columns"""
return skip_if(
[
no_support("firebird", "not supported by database"),
no_support("oracle", "not supported by database"),
no_support("mssql", "not supported by database"),
no_support("sybase", "not supported by database"),
]
)
@property
def non_native_boolean_unconstrained(self):
"""target database is not native boolean and allows arbitrary integers
in it's "bool" column"""
return skip_if(
[
LambdaPredicate(
lambda config: against(config, "mssql"),
"SQL Server drivers / odbc seem to change "
"their mind on this",
),
LambdaPredicate(
lambda config: config.db.dialect.supports_native_boolean,
"native boolean dialect",
),
]
)
@property
def standalone_binds(self):
"""target database/driver supports bound parameters as column expressions
without being in the context of a typed column.
"""
return skip_if(["firebird", "mssql+mxodbc"], "not supported by driver")
@property
def qmark_paramstyle(self):
return only_on(
["firebird", "sqlite", "+pyodbc", "+mxodbc", "mysql+oursql"]
)
@property
def named_paramstyle(self):
return only_on(["sqlite", "oracle+cx_oracle"])
@property
def format_paramstyle(self):
return only_on(
[
"mysql+mysqldb",
"mysql+pymysql",
"mysql+cymysql",
"mysql+mysqlconnector",
"postgresql",
]
)
@property
def pyformat_paramstyle(self):
return only_on(
[
"postgresql+psycopg2",
"postgresql+psycopg2cffi",
"postgresql+pypostgresql",
"postgresql+pygresql",
"mysql+mysqlconnector",
"mysql+pymysql",
"mysql+cymysql",
"mssql+pymssql",
]
)
@property
def no_quoting_special_bind_names(self):
"""Target database will quote bound parameter names, doesn't support
EXPANDING"""
return skip_if(["oracle"])
@property
def identity(self):
"""Target database must support GENERATED AS IDENTITY or a facsimile.
Includes GENERATED AS IDENTITY, AUTOINCREMENT, AUTO_INCREMENT, or other
column DDL feature that fills in a DB-generated identifier at
INSERT-time without requiring pre-execution of a SEQUENCE or other
artifact.
"""
return skip_if(
["firebird", "oracle", "postgresql", "sybase"],
"not supported by database",
)
@property
def temporary_tables(self):
"""target database supports temporary tables"""
return skip_if(
["mssql", "firebird", self._sqlite_file_db], "not supported (?)"
)
@property
def temp_table_reflection(self):
return self.temporary_tables
@property
def reflectable_autoincrement(self):
"""Target database must support tables that can automatically generate
PKs assuming they were reflected.
this is essentially all the DBs in "identity" plus PostgreSQL, which
has SERIAL support. FB and Oracle (and sybase?) require the Sequence
to be explicitly added, including if the table was reflected.
"""
return skip_if(
["firebird", "oracle", "sybase"], "not supported by database"
)
@property
def insert_from_select(self):
return skip_if(["firebird"], "crashes for unknown reason")
@property
def fetch_rows_post_commit(self):
return skip_if(["firebird"], "not supported")
@property
def non_broken_binary(self):
"""target DBAPI must work fully with binary values"""
# see https://github.com/pymssql/pymssql/issues/504
return skip_if(["mssql+pymssql"])
@property
def binary_comparisons(self):
"""target database/driver can allow BLOB/BINARY fields to be compared
against a bound parameter value.
"""
return skip_if(["oracle", "mssql"], "not supported by database/driver")
@property
def binary_literals(self):
"""target backend supports simple binary literals, e.g. an
expression like::
SELECT CAST('foo' AS BINARY)
Where ``BINARY`` is the type emitted from :class:`.LargeBinary`,
e.g. it could be ``BLOB`` or similar.
Basically fails on Oracle.
"""
# adding mssql here since it doesn't support comparisons either,
# have observed generally bad behavior with binary / mssql.
return skip_if(["oracle", "mssql"], "not supported by database/driver")
@property
def tuple_in(self):
def _sqlite_tuple_in(config):
return against(
config, "sqlite"
) and config.db.dialect.dbapi.sqlite_version_info >= (3, 15, 0)
return only_on(["mysql", "postgresql", _sqlite_tuple_in])
@property
def independent_cursors(self):
"""Target must support simultaneous, independent database cursors
on a single connection."""
return skip_if(["mssql", "mysql"], "no driver support")
@property
def independent_connections(self):
"""
Target must support simultaneous, independent database connections.
"""
# This is also true of some configurations of UnixODBC and probably
# win32 ODBC as well.
return skip_if(
[
no_support(
"sqlite",
"independent connections disabled "
"when :memory: connections are used",
),
exclude(
"mssql",
"<",
(9, 0, 0),
"SQL Server 2005+ is required for "
"independent connections",
),
]
)
@property
def memory_process_intensive(self):
"""Driver is able to handle the memory tests which run in a subprocess
and iterate through hundreds of connections
"""
return skip_if(
[
no_support("oracle", "Oracle XE usually can't handle these"),
no_support("mssql+pyodbc", "MS ODBC drivers struggle"),
self._running_on_windows(),
]
)
@property
def updateable_autoincrement_pks(self):
"""Target must support UPDATE on autoincrement/integer primary key."""
return skip_if(
["mssql", "sybase"], "IDENTITY columns can't be updated"
)
@property
def isolation_level(self):
return only_on(
("postgresql", "sqlite", "mysql", "mssql", "oracle"),
"DBAPI has no isolation level support",
) + fails_on(
"postgresql+pypostgresql",
"pypostgresql bombs on multiple isolation level calls",
)
def get_isolation_levels(self, config):
levels = set(config.db.dialect._isolation_lookup)
if against(config, "sqlite"):
default = "SERIALIZABLE"
levels.add("AUTOCOMMIT")
elif against(config, "postgresql"):
default = "READ COMMITTED"
levels.add("AUTOCOMMIT")
elif against(config, "mysql"):
default = "REPEATABLE READ"
levels.add("AUTOCOMMIT")
elif against(config, "mssql"):
default = "READ COMMITTED"
levels.add("AUTOCOMMIT")
elif against(config, "oracle"):
default = "READ COMMITTED"
levels.add("AUTOCOMMIT")
else:
raise NotImplementedError()
return {"default": default, "supported": levels}
@property
def autocommit(self):
"""target dialect supports 'AUTOCOMMIT' as an isolation_level"""
return self.isolation_level + only_if(
lambda config: "AUTOCOMMIT"
in self.get_isolation_levels(config)["supported"]
)
@property
def row_triggers(self):
"""Target must support standard statement-running EACH ROW triggers."""
return skip_if(
[
# no access to same table
no_support("mysql", "requires SUPER priv"),
exclude("mysql", "<", (5, 0, 10), "not supported by database"),
# huh? TODO: implement triggers for PG tests, remove this
no_support(
"postgresql",
"PG triggers need to be implemented for tests",
),
]
)
@property
def sequences_as_server_defaults(self):
"""Target database must support SEQUENCE as a server side default."""
return only_on(
"postgresql", "doesn't support sequences as a server side default."
)
@property
def sql_expressions_inserted_as_primary_key(self):
return only_if([self.returning, self.sqlite])
@property
def computed_columns_on_update_returning(self):
return self.computed_columns + skip_if("oracle")
@property
def correlated_outer_joins(self):
"""Target must support an outer join to a subquery which
correlates to the parent."""
return skip_if(
"oracle",
'Raises "ORA-01799: a column may not be '
'outer-joined to a subquery"',
)
@property
def update_from(self):
"""Target must support UPDATE..FROM syntax"""
return only_on(
["postgresql", "mssql", "mysql"],
"Backend does not support UPDATE..FROM",
)
@property
def delete_from(self):
"""Target must support DELETE FROM..FROM or DELETE..USING syntax"""
return only_on(
["postgresql", "mssql", "mysql", "sybase"],
"Backend does not support DELETE..FROM",
)
@property
def update_where_target_in_subquery(self):
"""Target must support UPDATE (or DELETE) where the same table is
present in a subquery in the WHERE clause.
This is an ANSI-standard syntax that apparently MySQL can't handle,
such as::
UPDATE documents SET flag=1 WHERE documents.title IN
(SELECT max(documents.title) AS title
FROM documents GROUP BY documents.user_id
)
"""
return fails_if(
self._mysql_not_mariadb_103,
'MySQL error 1093 "Cant specify target table '
'for update in FROM clause", resolved by MariaDB 10.3',
)
@property
def savepoints(self):
"""Target database must support savepoints."""
return skip_if(
["sqlite", "sybase", ("mysql", "<", (5, 0, 3))],
"savepoints not supported",
)
@property
def savepoints_w_release(self):
return self.savepoints + skip_if(
["oracle", "mssql"],
"database doesn't support release of savepoint",
)
@property
def schemas(self):
"""Target database must support external schemas, and have one
named 'test_schema'."""
return skip_if(["firebird"], "no schema support")
@property
def cross_schema_fk_reflection(self):
"""target system must support reflection of inter-schema foreign keys
"""
return only_on(["postgresql", "mysql", "mssql"])
@property
def implicit_default_schema(self):
"""target system has a strong concept of 'default' schema that can
be referred to implicitly.
basically, PostgreSQL.
"""
return only_on(["postgresql"])
@property
def unique_constraint_reflection(self):
return fails_on_everything_except(
"postgresql", "mysql", "sqlite", "oracle"
)
@property
def unique_constraint_reflection_no_index_overlap(self):
return (
self.unique_constraint_reflection
+ skip_if("mysql")
+ skip_if("oracle")
)
@property
def check_constraint_reflection(self):
return fails_on_everything_except(
"postgresql",
"sqlite",
"oracle",
self._mysql_and_check_constraints_exist,
)
@property
def indexes_with_expressions(self):
return only_on(["postgresql", "sqlite>=3.9.0"])
@property
def temp_table_names(self):
"""target dialect supports listing of temporary table names"""
return only_on(["sqlite", "oracle"]) + skip_if(self._sqlite_file_db)
@property
def temporary_views(self):
"""target database supports temporary views"""
return only_on(["sqlite", "postgresql"]) + skip_if(
self._sqlite_file_db
)
@property
def update_nowait(self):
"""Target database must support SELECT...FOR UPDATE NOWAIT"""
return skip_if(
["firebird", "mssql", "mysql", "sqlite", "sybase"],
"no FOR UPDATE NOWAIT support",
)
@property
def subqueries(self):
"""Target database must support subqueries."""
return skip_if(exclude("mysql", "<", (4, 1, 1)), "no subquery support")
@property
def ctes(self):
"""Target database supports CTEs"""
return only_on(
[
lambda config: against(config, "mysql")
and (
config.db.dialect._is_mariadb
and config.db.dialect._mariadb_normalized_version_info
>= (10, 2)
),
"postgresql",
"mssql",
"oracle",
]
)
@property
def ctes_with_update_delete(self):
"""target database supports CTES that ride on top of a normal UPDATE
or DELETE statement which refers to the CTE in a correlated subquery.
"""
return only_on(
[
"postgresql",
"mssql",
# "oracle" - oracle can do this but SQLAlchemy doesn't support
# their syntax yet
]
)
@property
def ctes_on_dml(self):
"""target database supports CTES which consist of INSERT, UPDATE
or DELETE *within* the CTE, e.g. WITH x AS (UPDATE....)"""
return only_if(["postgresql"])
@property
def mod_operator_as_percent_sign(self):
"""target database must use a plain percent '%' as the 'modulus'
operator."""
return only_if(["mysql", "sqlite", "postgresql+psycopg2", "mssql"])
@property
def intersect(self):
"""Target database must support INTERSECT or equivalent."""
return fails_if(
["firebird", self._mysql_not_mariadb_103, "sybase"],
"no support for INTERSECT",
)
@property
def except_(self):
"""Target database must support EXCEPT or equivalent (i.e. MINUS)."""
return fails_if(
["firebird", self._mysql_not_mariadb_103, "sybase"],
"no support for EXCEPT",
)
@property
def order_by_col_from_union(self):
"""target database supports ordering by a column from a SELECT
inside of a UNION
E.g. (SELECT id, ...) UNION (SELECT id, ...) ORDER BY id
Fails on SQL Server
"""
return fails_if("mssql")
@property
def parens_in_union_contained_select_w_limit_offset(self):
"""Target database must support parenthesized SELECT in UNION
when LIMIT/OFFSET is specifically present.
E.g. (SELECT ... LIMIT ..) UNION (SELECT .. OFFSET ..)
This is known to fail on SQLite.
"""
return fails_if("sqlite")
@property
def parens_in_union_contained_select_wo_limit_offset(self):
"""Target database must support parenthesized SELECT in UNION
when OFFSET/LIMIT is specifically not present.
E.g. (SELECT ...) UNION (SELECT ..)
This is known to fail on SQLite. It also fails on Oracle
because without LIMIT/OFFSET, there is currently no step that
creates an additional subquery.
"""
return fails_if(["sqlite", "oracle"])
@property
def offset(self):
"""Target database must support some method of adding OFFSET or
equivalent to a result set."""
return fails_if(["sybase"], "no support for OFFSET or equivalent")
@property
def sql_expression_limit_offset(self):
return (
fails_if(
["mysql"],
"Target backend can't accommodate full expressions in "
"OFFSET or LIMIT",
)
+ self.offset
)
@property
def window_functions(self):
return only_if(
["postgresql>=8.4", "mssql", "oracle", "sqlite>=3.25.0"],
"Backend does not support window functions",
)
@property
def two_phase_transactions(self):
"""Target database must support two-phase transactions."""
def pg_prepared_transaction(config):
if not against(config, "postgresql"):
return False
with config.db.connect() as conn:
try:
num = conn.scalar(
text(
"select cast(setting AS integer) from pg_settings "
"where name = 'max_prepared_transactions'"
)
)
except exc.OperationalError:
return False
else:
return num > 0
return skip_if(
[
no_support("firebird", "no SA implementation"),
no_support("mssql", "two-phase xact not supported by drivers"),
no_support(
"oracle", "two-phase xact not implemented in SQLA/oracle"
),
no_support(
"drizzle", "two-phase xact not supported by database"
),
no_support(
"sqlite", "two-phase xact not supported by database"
),
no_support(
"sybase", "two-phase xact not supported by drivers/SQLA"
),
no_support(
"mysql",
"recent MySQL communiity editions have too many issues "
"(late 2016), disabling for now",
),
NotPredicate(
LambdaPredicate(
pg_prepared_transaction,
"max_prepared_transactions not available or zero",
)
),
]
)
@property
def two_phase_recovery(self):
return self.two_phase_transactions + (
skip_if("mysql", "crashes on most mariadb and mysql versions")
)
@property
def views(self):
"""Target database must support VIEWs."""
return skip_if("drizzle", "no VIEW support")
@property
def empty_strings_varchar(self):
"""
target database can persist/return an empty string with a varchar.
"""
return fails_if(
["oracle"], "oracle converts empty strings to a blank space"
)
@property
def empty_strings_text(self):
"""target database can persist/return an empty string with an
unbounded text."""
return exclusions.open()
@property
def expressions_against_unbounded_text(self):
"""target database supports use of an unbounded textual field in a
WHERE clause."""
return fails_if(
["oracle"],
"ORA-00932: inconsistent datatypes: expected - got CLOB",
)
@property
def unicode_data(self):
"""target drive must support unicode data stored in columns."""
return skip_if([no_support("sybase", "no unicode driver support")])
@property
def unicode_connections(self):
"""
Target driver must support some encoding of Unicode across the wire.
"""
# TODO: expand to exclude MySQLdb versions w/ broken unicode
return skip_if(
[exclude("mysql", "<", (4, 1, 1), "no unicode connection support")]
)
@property
def unicode_ddl(self):
"""Target driver must support some degree of non-ascii symbol names."""
# TODO: expand to exclude MySQLdb versions w/ broken unicode
return skip_if(
[
no_support("oracle", "FIXME: no support in database?"),
no_support("sybase", "FIXME: guessing, needs confirmation"),
no_support("mssql+pymssql", "no FreeTDS support"),
LambdaPredicate(
lambda config: against(config, "mysql+mysqlconnector")
and config.db.dialect._mysqlconnector_version_info > (2, 0)
and util.py2k,
"bug in mysqlconnector 2.0",
),
exclude(
"mysql", "<", (4, 1, 1), "no unicode connection support"
),
]
)
@property
def emulated_lastrowid(self):
""""target dialect retrieves cursor.lastrowid or an equivalent
after an insert() construct executes.
"""
return fails_on_everything_except(
"mysql", "sqlite+pysqlite", "sqlite+pysqlcipher", "sybase", "mssql"
)
@property
def implements_get_lastrowid(self):
return skip_if([no_support("sybase", "not supported by database")])
@property
def dbapi_lastrowid(self):
""""target backend includes a 'lastrowid' accessor on the DBAPI
cursor object.
"""
return skip_if(
"mssql+pymssql", "crashes on pymssql"
) + fails_on_everything_except(
"mysql", "sqlite+pysqlite", "sqlite+pysqlcipher", "mssql"
)
@property
def nullsordering(self):
"""Target backends that support nulls ordering."""
return fails_on_everything_except(
"postgresql", "oracle", "firebird", "sqlite >= 3.30.0"
)
@property
def reflects_pk_names(self):
"""Target driver reflects the name of primary key constraints."""
return fails_on_everything_except(
"postgresql", "oracle", "mssql", "sybase", "sqlite"
)
@property
def nested_aggregates(self):
"""target database can select an aggregate from a subquery that's
also using an aggregate"""
return skip_if(["mssql", "sqlite"])
@property
def array_type(self):
return only_on(
[
lambda config: against(config, "postgresql")
and not against(config, "+pg8000")
]
)
@property
def json_type(self):
return only_on(
[
lambda config: against(config, "mysql")
and (
(
not config.db.dialect._is_mariadb
and against(config, "mysql >= 5.7")
)
or (
config.db.dialect._mariadb_normalized_version_info
>= (10, 2, 7)
)
),
"postgresql >= 9.3",
self._sqlite_json,
]
)
@property
def json_index_supplementary_unicode_element(self):
# for sqlite see https://bugs.python.org/issue38749
return skip_if(
[
lambda config: against(config, "mysql")
and config.db.dialect._is_mariadb,
"sqlite",
]
)
def _sqlite_file_db(self, config):
return against(config, "sqlite") and config.db.dialect._is_url_file_db(
config.db.url
)
def _sqlite_memory_db(self, config):
return against(
config, "sqlite"
) and not config.db.dialect._is_url_file_db(config.db.url)
def _sqlite_json(self, config):
if not against(config, "sqlite >= 3.9"):
return False
else:
with config.db.connect() as conn:
try:
return (
conn.exec_driver_sql(
"""select json_extract('{"foo": "bar"}', """
"""'$."foo"')"""
).scalar()
== "bar"
)
except exc.DBAPIError:
return False
@property
def reflects_json_type(self):
return only_on(
[
lambda config: against(config, "mysql >= 5.7")
and not config.db.dialect._is_mariadb,
"postgresql >= 9.3",
"sqlite >= 3.9",
]
)
@property
def json_array_indexes(self):
return self.json_type + fails_if("+pg8000")
@property
def datetime_literals(self):
"""target dialect supports rendering of a date, time, or datetime as a
literal string, e.g. via the TypeEngine.literal_processor() method.
"""
return fails_on_everything_except("sqlite")
@property
def datetime(self):
"""target dialect supports representation of Python
datetime.datetime() objects."""
return exclusions.open()
@property
def datetime_microseconds(self):
"""target dialect supports representation of Python
datetime.datetime() with microsecond objects."""
return skip_if(["mssql", "mysql", "firebird", "oracle", "sybase"])
@property
def timestamp_microseconds(self):
"""target dialect supports representation of Python
datetime.datetime() with microsecond objects but only
if TIMESTAMP is used."""
return only_on(["oracle"])
@property
def datetime_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1900) values."""
return succeeds_if(["sqlite", "postgresql", "firebird"])
@property
def date(self):
"""target dialect supports representation of Python
datetime.date() objects."""
return exclusions.open()
@property
def date_coerces_from_datetime(self):
"""target dialect accepts a datetime object as the target
of a date column."""
# does not work as of pyodbc 4.0.22
return fails_on("mysql+mysqlconnector") + skip_if("mssql+pyodbc")
@property
def date_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1900) values."""
return succeeds_if(["sqlite", "postgresql", "firebird"])
@property
def time(self):
"""target dialect supports representation of Python
datetime.time() objects."""
return skip_if(["oracle"])
@property
def time_microseconds(self):
"""target dialect supports representation of Python
datetime.time() with microsecond objects."""
return skip_if(["mssql", "mysql", "firebird", "oracle", "sybase"])
@property
def precision_numerics_general(self):
"""target backend has general support for moderately high-precision
numerics."""
return exclusions.open()
@property
def precision_numerics_enotation_small(self):
"""target backend supports Decimal() objects using E notation
to represent very small values."""
# NOTE: this exclusion isn't used in current tests.
return exclusions.open()
@property
def precision_numerics_enotation_large(self):
"""target backend supports Decimal() objects using E notation
to represent very large values."""
return fails_if(
[
(
"sybase+pyodbc",
None,
None,
"Don't know how do get these values through "
"FreeTDS + Sybase",
),
("firebird", None, None, "Precision must be from 1 to 18"),
]
)
@property
def precision_numerics_many_significant_digits(self):
"""target backend supports values with many digits on both sides,
such as 319438950232418390.273596, 87673.594069654243
"""
def broken_cx_oracle(config):
return (
against(config, "oracle+cx_oracle")
and config.db.dialect.cx_oracle_ver <= (6, 0, 2)
and config.db.dialect.cx_oracle_ver > (6,)
)
return fails_if(
[
("sqlite", None, None, "TODO"),
("firebird", None, None, "Precision must be from 1 to 18"),
("sybase+pysybase", None, None, "TODO"),
]
)
@property
def precision_numerics_retains_significant_digits(self):
"""A precision numeric type will return empty significant digits,
i.e. a value such as 10.000 will come back in Decimal form with
the .000 maintained."""
return fails_if(
[
("oracle", None, None, "driver doesn't do this automatically"),
(
"firebird",
None,
None,
"database and/or driver truncates decimal places.",
),
]
)
@property
def precision_generic_float_type(self):
"""target backend will return native floating point numbers with at
least seven decimal places when using the generic Float type."""
return fails_if(
[
(
"mysql",
None,
None,
"mysql FLOAT type only returns 4 decimals",
),
(
"firebird",
None,
None,
"firebird FLOAT type isn't high precision",
),
]
)
@property
def floats_to_four_decimals(self):
return fails_if(
[
("mysql+oursql", None, None, "Floating point error"),
(
"firebird",
None,
None,
"Firebird still has FP inaccuracy even "
"with only four decimal places",
),
(
"mssql+pyodbc",
None,
None,
"mssql+pyodbc has FP inaccuracy even with "
"only four decimal places ",
),
(
"mssql+pymssql",
None,
None,
"mssql+pymssql has FP inaccuracy even with "
"only four decimal places ",
),
(
"postgresql+pg8000",
None,
None,
"postgresql+pg8000 has FP inaccuracy even with "
"only four decimal places ",
),
(
"postgresql+psycopg2cffi",
None,
None,
"postgresql+psycopg2cffi has FP inaccuracy even with "
"only four decimal places ",
),
]
)
@property
def implicit_decimal_binds(self):
"""target backend will return a selected Decimal as a Decimal, not
a string.
e.g.::
expr = decimal.Decimal("15.7563")
value = e.scalar(
select([literal(expr)])
)
assert value == expr
See :ticket:`4036`
"""
# fixed for mysqlclient in
# https://github.com/PyMySQL/mysqlclient-python/commit/68b9662918577fc05be9610ef4824a00f2b051b0
def check(config):
if against(config, "mysql+mysqldb"):
# can remove once post 1.3.13 is released
try:
from MySQLdb import converters
from decimal import Decimal
return Decimal not in converters.conversions
except:
return True
return against(
config, "mysql+mysqldb"
) and config.db.dialect._mysql_dbapi_version <= (1, 3, 13)
return exclusions.fails_on(check, "fixed for mysqlclient post 1.3.13")
@property
def fetch_null_from_numeric(self):
return skip_if(("mssql+pyodbc", None, None, "crashes due to bug #351"))
@property
def duplicate_key_raises_integrity_error(self):
return fails_on("postgresql+pg8000")
def _has_pg_extension(self, name):
def check(config):
if not against(config, "postgresql"):
return False
count = (
config.db.connect(close_with_result=True)
.exec_driver_sql(
"SELECT count(*) FROM pg_extension "
"WHERE extname='%s'" % name
)
.scalar()
)
return bool(count)
return only_if(check, "needs %s extension" % name)
@property
def hstore(self):
return self._has_pg_extension("hstore")
@property
def btree_gist(self):
return self._has_pg_extension("btree_gist")
@property
def range_types(self):
def check_range_types(config):
if not against(
config, ["postgresql+psycopg2", "postgresql+psycopg2cffi"]
):
return False
try:
config.db.connect(close_with_result=True).exec_driver_sql(
"select '[1,2)'::int4range;"
).scalar()
return True
except Exception:
return False
return only_if(check_range_types)
@property
def oracle_test_dblink(self):
return skip_if(
lambda config: not config.file_config.has_option(
"sqla_testing", "oracle_db_link"
),
"oracle_db_link option not specified in config",
)
@property
def postgresql_test_dblink(self):
return skip_if(
lambda config: not config.file_config.has_option(
"sqla_testing", "postgres_test_db_link"
),
"postgres_test_db_link option not specified in config",
)
@property
def postgresql_jsonb(self):
return only_on("postgresql >= 9.4") + skip_if(
lambda config: config.db.dialect.driver == "pg8000"
and config.db.dialect._dbapi_version <= (1, 10, 1)
)
@property
def psycopg2_native_json(self):
return self.psycopg2_compatibility
@property
def psycopg2_native_hstore(self):
return self.psycopg2_compatibility
@property
def psycopg2_compatibility(self):
return only_on(["postgresql+psycopg2", "postgresql+psycopg2cffi"])
@property
def psycopg2_or_pg8000_compatibility(self):
return only_on(
[
"postgresql+psycopg2",
"postgresql+psycopg2cffi",
"postgresql+pg8000",
]
)
@property
def percent_schema_names(self):
return skip_if(
[
(
"+psycopg2",
None,
None,
"psycopg2 2.4 no longer accepts percent "
"sign in bind placeholders",
),
(
"+psycopg2cffi",
None,
None,
"psycopg2cffi does not accept percent signs in "
"bind placeholders",
),
("mysql", None, None, "executemany() doesn't work here"),
]
)
@property
def order_by_label_with_expression(self):
return fails_if(
[
(
"firebird",
None,
None,
"kinterbasdb doesn't send full type information",
),
("postgresql", None, None, "only simple labels allowed"),
("sybase", None, None, "only simple labels allowed"),
("mssql", None, None, "only simple labels allowed"),
]
)
def get_order_by_collation(self, config):
lookup = {
# will raise without quoting
"postgresql": "POSIX",
# note MySQL databases need to be created w/ utf8mb4 charset
# for the test suite
"mysql": "utf8mb4_bin",
"sqlite": "NOCASE",
# will raise *with* quoting
"mssql": "Latin1_General_CI_AS",
}
try:
return lookup[config.db.name]
except KeyError:
raise NotImplementedError()
@property
def skip_mysql_on_windows(self):
"""Catchall for a large variety of MySQL on Windows failures"""
return skip_if(
self._has_mysql_on_windows, "Not supported on MySQL + Windows"
)
@property
def mssql_freetds(self):
return only_on(["mssql+pymssql"])
@property
def ad_hoc_engines(self):
return exclusions.skip_if(
["oracle"],
"works, but Oracle just gets tired with "
"this much connection activity",
)
@property
def no_mssql_freetds(self):
return self.mssql_freetds.not_()
@property
def pyodbc_fast_executemany(self):
def has_fastexecutemany(config):
if not against(config, "mssql+pyodbc"):
return False
if config.db.dialect._dbapi_version() < (4, 0, 19):
return False
with config.db.connect() as conn:
drivername = conn.connection.connection.getinfo(
config.db.dialect.dbapi.SQL_DRIVER_NAME
)
# on linux this is something like 'libmsodbcsql-13.1.so.9.2'.
# on Windows this is something like 'msodbcsql17.dll'.
return "msodbc" in drivername
return only_if(
has_fastexecutemany, "only on pyodbc > 4.0.19 w/ msodbc driver"
)
@property
def python_fixed_issue_8743(self):
return exclusions.skip_if(
lambda: sys.version_info < (2, 7, 8),
"Python issue 8743 fixed in Python 2.7.8",
)
@property
def granular_timezone(self):
"""the datetime.timezone class, or SQLAlchemy's port, supports
seconds and microseconds.
SQLAlchemy ported the Python 3.7 version for Python 2, so
it passes on that. For Python 3.6 and earlier, it is not supported.
"""
return exclusions.skip_if(
lambda: sys.version_info >= (3,) and sys.version_info < (3, 7)
)
@property
def selectone(self):
"""target driver must support the literal statement 'select 1'"""
return skip_if(
["oracle", "firebird"], "non-standard SELECT scalar syntax"
)
@property
def mysql_for_update(self):
return skip_if(
"mysql+mysqlconnector",
"lock-sensitive operations crash on mysqlconnector",
)
@property
def mysql_fsp(self):
return only_if("mysql >= 5.6.4")
@property
def mysql_fully_case_sensitive(self):
return only_if(self._has_mysql_fully_case_sensitive)
@property
def mysql_zero_date(self):
def check(config):
if not against(config, "mysql"):
return False
row = (
config.db.connect(close_with_result=True)
.exec_driver_sql("show variables like 'sql_mode'")
.first()
)
return not row or "NO_ZERO_DATE" not in row[1]
return only_if(check)
@property
def mysql_non_strict(self):
def check(config):
if not against(config, "mysql"):
return False
row = (
config.db.connect(close_with_result=True)
.exec_driver_sql("show variables like 'sql_mode'")
.first()
)
return not row or "STRICT_TRANS_TABLES" not in row[1]
return only_if(check)
@property
def mysql_ngram_fulltext(self):
def check(config):
return (
against(config, "mysql")
and not config.db.dialect._is_mariadb
and config.db.dialect.server_version_info >= (5, 7)
)
return only_if(check)
def _mysql_80(self, config):
return (
against(config, "mysql")
and config.db.dialect._is_mysql
and config.db.dialect.server_version_info >= (8,)
)
def _mariadb_102(self, config):
return (
against(config, "mysql")
and config.db.dialect._is_mariadb
and config.db.dialect._mariadb_normalized_version_info > (10, 2)
)
def _mysql_and_check_constraints_exist(self, config):
# 1. we have mysql / mariadb and
# 2. it enforces check constraints
if exclusions.against(config, "mysql"):
if config.db.dialect._is_mariadb:
norm_version_info = (
config.db.dialect._mariadb_normalized_version_info
)
return norm_version_info >= (10, 2)
else:
norm_version_info = config.db.dialect.server_version_info
return norm_version_info >= (8, 0, 16)
else:
return False
def _mysql_check_constraints_exist(self, config):
# 1. we dont have mysql / mariadb or
# 2. we have mysql / mariadb that enforces check constraints
return not exclusions.against(
config, "mysql"
) or self._mysql_and_check_constraints_exist(config)
def _mysql_check_constraints_dont_exist(self, config):
# 1. we have mysql / mariadb and
# 2. they dont enforce check constraints
return not self._mysql_check_constraints_exist(config)
def _mysql_not_mariadb_102(self, config):
return against(config, "mysql") and (
not config.db.dialect._is_mariadb
or config.db.dialect._mariadb_normalized_version_info < (10, 2)
)
def _mysql_not_mariadb_103(self, config):
return against(config, "mysql") and (
not config.db.dialect._is_mariadb
or config.db.dialect._mariadb_normalized_version_info < (10, 3)
)
def _mysql_not_mariadb_104(self, config):
return against(config, "mysql") and (
not config.db.dialect._is_mariadb
or config.db.dialect._mariadb_normalized_version_info < (10, 4)
)
def _has_mysql_on_windows(self, config):
return (
against(config, "mysql")
and config.db.dialect._detect_casing(config.db) == 1
)
def _has_mysql_fully_case_sensitive(self, config):
return (
against(config, "mysql")
and config.db.dialect._detect_casing(config.db) == 0
)
@property
def postgresql_utf8_server_encoding(self):
return only_if(
lambda config: against(config, "postgresql")
and config.db.connect(close_with_result=True)
.exec_driver_sql("show server_encoding")
.scalar()
.lower()
== "utf8"
)
@property
def cxoracle6_or_greater(self):
return only_if(
lambda config: against(config, "oracle+cx_oracle")
and config.db.dialect.cx_oracle_ver >= (6,)
)
@property
def oracle5x(self):
return only_if(
lambda config: against(config, "oracle+cx_oracle")
and config.db.dialect.cx_oracle_ver < (6,)
)
@property
def computed_columns(self):
return skip_if(["postgresql < 12", "sqlite", "mysql < 5.7"])
@property
def python_profiling_backend(self):
return only_on([self._sqlite_memory_db])
@property
def computed_columns_stored(self):
return self.computed_columns + skip_if(["oracle", "firebird"])
@property
def computed_columns_virtual(self):
return self.computed_columns + skip_if(["postgresql", "firebird"])
@property
def computed_columns_default_persisted(self):
return self.computed_columns + only_if("postgresql")
@property
def computed_columns_reflect_persisted(self):
return self.computed_columns + skip_if("oracle")
|
{
"content_hash": "fc04d7c21ea2dee81bb66ac9f6896104",
"timestamp": "",
"source": "github",
"line_count": 1609,
"max_line_length": 103,
"avg_line_length": 31.09633312616532,
"alnum_prop": 0.5455690130711116,
"repo_name": "graingert/sqlalchemy",
"id": "669e0b7eb178743b76b7055deb7152260c27504a",
"size": "50034",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/requirements.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "49149"
},
{
"name": "Python",
"bytes": "11845913"
}
],
"symlink_target": ""
}
|
import sys
import os
from argparse import ArgumentParser
from Bio import SeqIO
try:
from string import maketrans
except ImportError:
maketrans = str.maketrans
def lesserkmer(s):
'''returns the lesser of a kmer and its reverse complement'''
t = revc(s)
if t < s:
return t
else:
return s
def revc(s):
'''returns reverse complement of a sequence'''
intab = "AaCcGgTt"
outtab = "TtGgCcAa"
trantab = maketrans(intab, outtab)
t = s.translate(trantab)[::-1]
return t
def gccontent(sq):
'''returns float gc content of sequence'''
length = len(sq)
gc_count = 0
for ch in sq:
if ch in ['G', 'C', 'g', 'c']:
gc_count += 1
try:
r = float(gc_count) / length
except:
r = 0
return r
def read_index(filename):
gian = {}
sys.stderr.write("Processing table %s ...\n" % (filename,))
in_idx = open(filename)
for l in in_idx:
if l[0] != "#":
s = l.rstrip().split()
gian[s[0]] = int(s[1])
return gian
def kmerabundance(seq, index):
'''looks up kmer abundance of each kmer in sequence, returns summary statistics'''
a = []
for i in range(0, len(seq) - k):
word = seq[i:i + k]
w = lesserkmer(word)
try:
a.append(index[w])
except KeyError:
a.append(0)
a.sort()
try:
median = a[len(a) / 2]
except IndexError:
median = 0
try:
minimum = a[0]
except IndexError:
minimum = 0
try:
maximum = a[-1]
except IndexError:
maximum = 0
try:
average = float(sum(a)) / len(a)
except IndexError:
average = 0
except ZeroDivisionError:
average = 0
return (minimum, median, maximum, average)
if __name__ == '__main__':
usage = "usage: %prog -1 <file1> [-2 <file2>] -i <index> [-o <outstem>] -l <cutoff>\n Note: generates outstem.hi.fastq and outstem.lo.fastq"
parser = ArgumentParser(usage)
parser.add_argument("-1", "--one", dest="one",
required=True, help="Input file 1")
parser.add_argument("-2", "--two", dest="two",
default=None, help="Input file 2 (interleaved if absent)")
parser.add_argument("-i", "--index", dest="index",
required=True, help="input index ")
parser.add_argument("-t", "--type", dest="typ",
default="fastq", help="input datatype (fastq, fasta)")
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true",
default=True, help="Verbose [default off]")
parser.add_argument("-l", "--cutoff", dest="cutoff",
default=None, help="median min coverage cutoff")
parser.add_argument("-o", "--outstem", dest="outstem",
default=None, help="output file stem")
args = parser.parse_args()
typ = args.typ
if not args.typ and args.one[-1] == "a" or args.one[-1] == "A":
typ = "fasta"
if not args.one:
parser.error("Missing input filename")
if not os.path.isfile(args.one):
parser.error("Missing input file %s" % args.one)
if args.two and os.path.isfile(args.two):
in_two = open(args.two)
if not args.cutoff:
sys.stderr.write("Warning: missing cutoff paramter -l\n")
args.cutoff = 0
if args.outstem == None:
args.outstem = args.one
in_one = open(args.one)
in_idx = open(args.index)
if args.verbose or 1:
sys.stderr.write(
"Processing sequences %s and table %s ...\n" % (args.one, args.index))
sys.stderr.write("Opening output files %s.hi.%s and %s.hi.%s\n" %
(args.outstem, typ, args.outstem, typ))
out_high = open("%s.hi.%s" % (args.outstem, typ), "w")
out_low1 = open("%s.lo.%s" % (args.outstem, typ), "w")
giant = {}
sys.stderr.write("Reading index...\n")
indexlist = args.index.split(",")
indexes = []
for i in range(len(indexlist)):
giant = read_index(indexlist[i])
indexes.append(giant)
k = len(next(indexes[0].iterkeys()))
sys.stderr.write("Done slurping... set k = %d\n" % (k,))
# Setup paired-read input
sys.stderr.write("Looping data: \n")
records1 = SeqIO.parse(in_one, typ)
if args.two:
records2 = SeqIO.parse(in_two, typ)
else:
records2 = records1
n = 0
for seq_record1 in records1:
n += 1
seq_record2 = records2.next()
seq1 = str(seq_record1.seq)
seq2 = str(seq_record2.seq)
if seq1.find("N") == -1 and seq2.find("N") == -1:
(min1, med1, max1, avg1) = kmerabundance(seq1, indexes[0])
(min2, med2, max2, avg2) = kmerabundance(seq2, indexes[0])
seq_record1.description = "%s\tmed%dmer=%d\tmax%dmer=%d\tmin%dmer=%d" % (
seq_record1.description, k, med1, k, max1, k, min1)
seq_record2.description = "%s\tmed%dmer=%d\tmax%dmer=%d\tmin%dmer=%d" % (
seq_record2.description, k, med2, k, max2, k, min2)
if med1 > float(args.cutoff) and med2 > float(args.cutoff):
SeqIO.write([seq_record1, seq_record2], out_high, typ)
else:
SeqIO.write([seq_record1, seq_record2], out_low1, typ)
out_low1.close()
out_high.close()
if args.verbose:
sys.stderr.write("Done. \n")
|
{
"content_hash": "109ac977d17d94f5001c33787f570b9b",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 146,
"avg_line_length": 33.018181818181816,
"alnum_prop": 0.5552496328928047,
"repo_name": "wltrimbl/kmerspectrumanalyzer",
"id": "a6d40c924f35cb91ea1e6bd91d897fa885ce2364",
"size": "5471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ksatools/fqfilter.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "2838"
},
{
"name": "Perl",
"bytes": "4666"
},
{
"name": "Python",
"bytes": "126361"
},
{
"name": "Roff",
"bytes": "14642049"
},
{
"name": "Shell",
"bytes": "17486"
}
],
"symlink_target": ""
}
|
import functools
import unittest
from test import support
from ctypes import *
from ctypes.test import need_symbol
import _ctypes_test
class Callbacks(unittest.TestCase):
functype = CFUNCTYPE
## def tearDown(self):
## import gc
## gc.collect()
def callback(self, *args):
self.got_args = args
return args[-1]
def check_type(self, typ, arg):
PROTO = self.functype.__func__(typ, typ)
result = PROTO(self.callback)(arg)
if typ == c_float:
self.assertAlmostEqual(result, arg, places=5)
else:
self.assertEqual(self.got_args, (arg,))
self.assertEqual(result, arg)
PROTO = self.functype.__func__(typ, c_byte, typ)
result = PROTO(self.callback)(-3, arg)
if typ == c_float:
self.assertAlmostEqual(result, arg, places=5)
else:
self.assertEqual(self.got_args, (-3, arg))
self.assertEqual(result, arg)
################
def test_byte(self):
self.check_type(c_byte, 42)
self.check_type(c_byte, -42)
def test_ubyte(self):
self.check_type(c_ubyte, 42)
def test_short(self):
self.check_type(c_short, 42)
self.check_type(c_short, -42)
def test_ushort(self):
self.check_type(c_ushort, 42)
def test_int(self):
self.check_type(c_int, 42)
self.check_type(c_int, -42)
def test_uint(self):
self.check_type(c_uint, 42)
def test_long(self):
self.check_type(c_long, 42)
self.check_type(c_long, -42)
def test_ulong(self):
self.check_type(c_ulong, 42)
def test_longlong(self):
self.check_type(c_longlong, 42)
self.check_type(c_longlong, -42)
def test_ulonglong(self):
self.check_type(c_ulonglong, 42)
def test_float(self):
# only almost equal: double -> float -> double
import math
self.check_type(c_float, math.e)
self.check_type(c_float, -math.e)
def test_double(self):
self.check_type(c_double, 3.14)
self.check_type(c_double, -3.14)
def test_longdouble(self):
self.check_type(c_longdouble, 3.14)
self.check_type(c_longdouble, -3.14)
def test_char(self):
self.check_type(c_char, b"x")
self.check_type(c_char, b"a")
# disabled: would now (correctly) raise a RuntimeWarning about
# a memory leak. A callback function cannot return a non-integral
# C type without causing a memory leak.
@unittest.skip('test disabled')
def test_char_p(self):
self.check_type(c_char_p, "abc")
self.check_type(c_char_p, "def")
@support.refcount_test
def test_pyobject(self):
o = ()
from sys import getrefcount as grc
for o in (), [], object():
initial = grc(o)
# This call leaks a reference to 'o'...
self.check_type(py_object, o)
before = grc(o)
# ...but this call doesn't leak any more. Where is the refcount?
self.check_type(py_object, o)
after = grc(o)
self.assertEqual((after, o), (before, o))
def test_unsupported_restype_1(self):
# Only "fundamental" result types are supported for callback
# functions, the type must have a non-NULL stgdict->setfunc.
# POINTER(c_double), for example, is not supported.
prototype = self.functype.__func__(POINTER(c_double))
# The type is checked when the prototype is called
self.assertRaises(TypeError, prototype, lambda: None)
def test_unsupported_restype_2(self):
prototype = self.functype.__func__(object)
self.assertRaises(TypeError, prototype, lambda: None)
@support.cpython_only
def test_issue_7959(self):
proto = self.functype.__func__(None)
class X(object):
def func(self): pass
def __init__(self):
self.v = proto(self.func)
import gc
for i in range(32):
X()
gc.collect()
live = [x for x in gc.get_objects()
if isinstance(x, X)]
self.assertEqual(len(live), 0)
def test_issue12483(self):
import gc
class Nasty:
def __del__(self):
gc.collect()
CFUNCTYPE(None)(lambda x=Nasty(): None)
@need_symbol('WINFUNCTYPE')
class StdcallCallbacks(Callbacks):
try:
functype = WINFUNCTYPE
except NameError:
pass
################################################################
class SampleCallbacksTestCase(unittest.TestCase):
def test_integrate(self):
# Derived from some then non-working code, posted by David Foster
dll = CDLL(_ctypes_test.__file__)
# The function prototype called by 'integrate': double func(double);
CALLBACK = CFUNCTYPE(c_double, c_double)
# The integrate function itself, exposed from the _ctypes_test dll
integrate = dll.integrate
integrate.argtypes = (c_double, c_double, CALLBACK, c_long)
integrate.restype = c_double
def func(x):
return x**2
result = integrate(0.0, 1.0, CALLBACK(func), 10)
diff = abs(result - 1./3.)
self.assertLess(diff, 0.01, "%s not less than 0.01" % diff)
def test_issue_8959_a(self):
from ctypes.util import find_library
libc_path = find_library("c")
if not libc_path:
self.skipTest('could not find libc')
libc = CDLL(libc_path)
@CFUNCTYPE(c_int, POINTER(c_int), POINTER(c_int))
def cmp_func(a, b):
return a[0] - b[0]
array = (c_int * 5)(5, 1, 99, 7, 33)
libc.qsort(array, len(array), sizeof(c_int), cmp_func)
self.assertEqual(array[:], [1, 5, 7, 33, 99])
@need_symbol('WINFUNCTYPE')
def test_issue_8959_b(self):
from ctypes.wintypes import BOOL, HWND, LPARAM
global windowCount
windowCount = 0
@WINFUNCTYPE(BOOL, HWND, LPARAM)
def EnumWindowsCallbackFunc(hwnd, lParam):
global windowCount
windowCount += 1
return True #Allow windows to keep enumerating
windll.user32.EnumWindows(EnumWindowsCallbackFunc, 0)
def test_callback_register_int(self):
# Issue #8275: buggy handling of callback args under Win64
# NOTE: should be run on release builds as well
dll = CDLL(_ctypes_test.__file__)
CALLBACK = CFUNCTYPE(c_int, c_int, c_int, c_int, c_int, c_int)
# All this function does is call the callback with its args squared
func = dll._testfunc_cbk_reg_int
func.argtypes = (c_int, c_int, c_int, c_int, c_int, CALLBACK)
func.restype = c_int
def callback(a, b, c, d, e):
return a + b + c + d + e
result = func(2, 3, 4, 5, 6, CALLBACK(callback))
self.assertEqual(result, callback(2*2, 3*3, 4*4, 5*5, 6*6))
def test_callback_register_double(self):
# Issue #8275: buggy handling of callback args under Win64
# NOTE: should be run on release builds as well
dll = CDLL(_ctypes_test.__file__)
CALLBACK = CFUNCTYPE(c_double, c_double, c_double, c_double,
c_double, c_double)
# All this function does is call the callback with its args squared
func = dll._testfunc_cbk_reg_double
func.argtypes = (c_double, c_double, c_double,
c_double, c_double, CALLBACK)
func.restype = c_double
def callback(a, b, c, d, e):
return a + b + c + d + e
result = func(1.1, 2.2, 3.3, 4.4, 5.5, CALLBACK(callback))
self.assertEqual(result,
callback(1.1*1.1, 2.2*2.2, 3.3*3.3, 4.4*4.4, 5.5*5.5))
def test_callback_large_struct(self):
class Check: pass
class X(Structure):
_fields_ = [
('first', c_ulong),
('second', c_ulong),
('third', c_ulong),
]
def callback(check, s):
check.first = s.first
check.second = s.second
check.third = s.third
check = Check()
s = X()
s.first = 0xdeadbeef
s.second = 0xcafebabe
s.third = 0x0bad1dea
CALLBACK = CFUNCTYPE(None, X)
dll = CDLL(_ctypes_test.__file__)
func = dll._testfunc_cbk_large_struct
func.argtypes = (X, CALLBACK)
func.restype = None
# the function just calls the callback with the passed structure
func(s, CALLBACK(functools.partial(callback, check)))
self.assertEqual(check.first, s.first)
self.assertEqual(check.second, s.second)
self.assertEqual(check.third, s.third)
self.assertEqual(check.first, 0xdeadbeef)
self.assertEqual(check.second, 0xcafebabe)
self.assertEqual(check.third, 0x0bad1dea)
################################################################
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "545c1e406ec9066c71008e87714fdda0",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 79,
"avg_line_length": 31.84561403508772,
"alnum_prop": 0.5672102247686205,
"repo_name": "IronLanguages/ironpython3",
"id": "125fdb7dd8c07107a78635d663bdfb02fba0df6c",
"size": "9076",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Src/StdLib/Lib/ctypes/test/test_callbacks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6855"
},
{
"name": "C",
"bytes": "239473"
},
{
"name": "C#",
"bytes": "12619304"
},
{
"name": "C++",
"bytes": "28403"
},
{
"name": "CSS",
"bytes": "96"
},
{
"name": "HTML",
"bytes": "13157428"
},
{
"name": "Makefile",
"bytes": "332"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "PowerShell",
"bytes": "84504"
},
{
"name": "Python",
"bytes": "29490541"
},
{
"name": "Roff",
"bytes": "21080"
},
{
"name": "Shell",
"bytes": "4872"
},
{
"name": "VBScript",
"bytes": "481"
}
],
"symlink_target": ""
}
|
"""
Support for functionality to download files.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/downloader/
"""
import logging
import os
import re
import threading
import requests
import voluptuous as vol
from homeassistant.helpers import validate_config
import homeassistant.helpers.config_validation as cv
from homeassistant.util import sanitize_filename
DOMAIN = "downloader"
SERVICE_DOWNLOAD_FILE = "download_file"
ATTR_URL = "url"
ATTR_SUBDIR = "subdir"
SERVICE_DOWNLOAD_FILE_SCHEMA = vol.Schema({
# pylint: disable=no-value-for-parameter
vol.Required(ATTR_URL): vol.Url(),
vol.Optional(ATTR_SUBDIR): cv.string,
})
CONF_DOWNLOAD_DIR = 'download_dir'
# pylint: disable=too-many-branches
def setup(hass, config):
"""Listen for download events to download files."""
logger = logging.getLogger(__name__)
if not validate_config(config, {DOMAIN: [CONF_DOWNLOAD_DIR]}, logger):
return False
download_path = config[DOMAIN][CONF_DOWNLOAD_DIR]
# If path is relative, we assume relative to HASS config dir
if not os.path.isabs(download_path):
download_path = hass.config.path(download_path)
if not os.path.isdir(download_path):
logger.error(
"Download path %s does not exist. File Downloader not active.",
download_path)
return False
def download_file(service):
"""Start thread to download file specified in the URL."""
def do_download():
"""Download the file."""
try:
url = service.data[ATTR_URL]
subdir = service.data.get(ATTR_SUBDIR)
if subdir:
subdir = sanitize_filename(subdir)
final_path = None
req = requests.get(url, stream=True, timeout=10)
if req.status_code == 200:
filename = None
if 'content-disposition' in req.headers:
match = re.findall(r"filename=(\S+)",
req.headers['content-disposition'])
if len(match) > 0:
filename = match[0].strip("'\" ")
if not filename:
filename = os.path.basename(
url).strip()
if not filename:
filename = "ha_download"
# Remove stuff to ruin paths
filename = sanitize_filename(filename)
# Do we want to download to subdir, create if needed
if subdir:
subdir_path = os.path.join(download_path, subdir)
# Ensure subdir exist
if not os.path.isdir(subdir_path):
os.makedirs(subdir_path)
final_path = os.path.join(subdir_path, filename)
else:
final_path = os.path.join(download_path, filename)
path, ext = os.path.splitext(final_path)
# If file exist append a number.
# We test filename, filename_2..
tries = 1
final_path = path + ext
while os.path.isfile(final_path):
tries += 1
final_path = "{}_{}.{}".format(path, tries, ext)
logger.info("%s -> %s", url, final_path)
with open(final_path, 'wb') as fil:
for chunk in req.iter_content(1024):
fil.write(chunk)
logger.info("Downloading of %s done", url)
except requests.exceptions.ConnectionError:
logger.exception("ConnectionError occured for %s", url)
# Remove file if we started downloading but failed
if final_path and os.path.isfile(final_path):
os.remove(final_path)
threading.Thread(target=do_download).start()
hass.services.register(DOMAIN, SERVICE_DOWNLOAD_FILE, download_file,
schema=SERVICE_DOWNLOAD_FILE_SCHEMA)
return True
|
{
"content_hash": "34412c98680b9e43ad19e42c3fef2dbc",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 78,
"avg_line_length": 31.437956204379564,
"alnum_prop": 0.540051079637799,
"repo_name": "devdelay/home-assistant",
"id": "c639619d7a70fc9386e7392e01b8920afb8a1785",
"size": "4307",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/downloader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1367764"
},
{
"name": "Python",
"bytes": "2777590"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "6430"
}
],
"symlink_target": ""
}
|
"""Test create_pipeline functionality"""
from unittest import mock
import pytest
from foremast.pipeline import SpinnakerPipeline
TEST_FORMAT_GENERATOR = mock.Mock()
TEST_SETTINGS = {
'dev': {
'regions': ['us-east-1'],
'us-east-1': {
'app': {
'app_description': 'Test App Demo application'
},
'deploy_strategy': 'highlander',
'regions': ['us-east-1'],
}
},
'pipeline': {
'base': 'tomcat8',
'config_commit': '',
'deployment': 'spinnaker',
'documentation': '',
'env': ['dev'],
'eureka': True,
'image': {
'builder': 'ebs',
'root_volume_size': 6
},
'regions': ['us-east-1', 'us-west-2'],
'type': 'ec2'
}
}
@pytest.fixture
@mock.patch('foremast.pipeline.create_pipeline.get_properties')
@mock.patch('foremast.pipeline.create_pipeline.get_details')
@mock.patch('foremast.pipeline.create_pipeline.os')
def spinnaker_pipeline(mock_os, mock_get_details, mock_get_prop):
"""Sets up pipeline fixture object"""
mock_get_prop.return_value = TEST_SETTINGS
pipelineObj = SpinnakerPipeline(
app='appgroup',
trigger_job='a_group_app', )
pipelineObj.generated = TEST_FORMAT_GENERATOR
pipelineObj.app_name = 'appgroup'
pipelineObj.group_name = 'group'
return pipelineObj
@mock.patch('foremast.pipeline.create_pipeline.clean_pipelines')
@mock.patch.object(SpinnakerPipeline, 'render_wrapper')
@mock.patch('foremast.pipeline.create_pipeline.get_subnets')
@mock.patch('foremast.pipeline.create_pipeline.construct_pipeline_block')
@mock.patch('foremast.pipeline.create_pipeline.renumerate_stages')
@mock.patch.object(SpinnakerPipeline, 'post_pipeline')
def test_create_pipeline_ec2(mock_post, mock_renumerate, mock_construct, mock_subnets, mock_wrapper, mock_clean,
spinnaker_pipeline):
"""test pipeline creation if ec2 pipeline."""
test_block_data = {
"env": "dev",
"generated": TEST_FORMAT_GENERATOR,
"previous_env": None,
"region": "us-east-1",
"settings": spinnaker_pipeline.settings["dev"]["us-east-1"],
"pipeline_data": spinnaker_pipeline.settings['pipeline'],
"region_subnets": {
'us-east-1': ['us-east-1d', 'us-east-1a', 'us-east-1e']
}
}
mock_subnets.return_value = {'dev': {'us-east-1': ['us-east-1d', 'us-east-1a', 'us-east-1e']}}
mock_construct.return_value = '{"test": "stuff"}'
mock_wrapper.return_value = {'stages': []}
created = spinnaker_pipeline.create_pipeline()
mock_construct.assert_called_with(**test_block_data)
mock_post.assert_called_with({'stages': ['test']})
assert created == True
|
{
"content_hash": "b9676b75abb317f428fdb2ca8c818561",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 112,
"avg_line_length": 34.875,
"alnum_prop": 0.6161290322580645,
"repo_name": "gogoair/foremast",
"id": "9a1ab53566beecfb6997373bbe142d534672c941",
"size": "3417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/pipeline/test_create_pipeline.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "7614"
},
{
"name": "Python",
"bytes": "484364"
},
{
"name": "Shell",
"bytes": "180"
}
],
"symlink_target": ""
}
|
'''
ubmodule-msg.py: simple response packet logger
Authors: Zdenek Vasicek (vasicek AT fit.vutbr.cz)
Marek Vavrusa (xvavru00 AT stud.fit.vutbr.cz)
Copyright (c) 2008. All rights reserved.
This software is open source.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
Modified for unit test by Wouter Wijngaards, NLnet Labs, 2009.
'''
import os
def init(id, cfg):
scripts=[]
s = cfg.python_script
while s != None:
scripts.append(s.str)
s = s.next
log_info("pythonmod: init called, module id is %d port: %d script: %s" % (id, cfg.port, scripts))
return True
def deinit(id):
log_info("pythonmod: deinit called, module id is %d" % id)
return True
def inform_super(id, qstate, superqstate, qdata):
return True
def setTTL(qstate, ttl):
"""Sets return_msg TTL and all the RRs TTL"""
if qstate.return_msg:
qstate.return_msg.rep.ttl = ttl
if (qstate.return_msg.rep):
for i in range(0,qstate.return_msg.rep.rrset_count):
d = qstate.return_msg.rep.rrsets[i].entry.data
for j in range(0,d.count+d.rrsig_count):
d.rr_ttl[j] = ttl
def dataHex(data, prefix=""):
res = ""
for i in range(0, int((len(data)+15)/16)):
res += "%s0x%02X | " % (prefix, i*16)
if type(data[0]) == type(1):
d = map(lambda x:int(x), data[i*16:i*16+17])
else:
d = map(lambda x:ord(x), data[i*16:i*16+17])
for ch in d:
res += "%02X " % ch
for i in range(0,17-len(data[i*16:i*16+17])):
res += " "
res += "| "
for ch in d:
if (ch < 32) or (ch > 127):
res += ". "
else:
res += "%c " % ch
res += "\n"
return res
def printReturnMsg(qstate):
print ("Return MSG rep :: flags: %04X, QDcount: %d, Security:%d, TTL=%d" % (qstate.return_msg.rep.flags, qstate.return_msg.rep.qdcount, qstate.return_msg.rep.security, qstate.return_msg.rep.ttl))
print (" qinfo :: qname:",qstate.return_msg.qinfo.qname_list, qstate.return_msg.qinfo.qname_str, "type:",qstate.return_msg.qinfo.qtype_str, "class:",qstate.return_msg.qinfo.qclass_str)
if (qstate.return_msg.rep):
print ("RRSets:",qstate.return_msg.rep.rrset_count)
prevkey = None
for i in range(0,qstate.return_msg.rep.rrset_count):
r = qstate.return_msg.rep.rrsets[i]
rk = r.rk
print (i,":",rk.dname_list, rk.dname_str, "flags: %04X" % rk.flags)
print ("type:",rk.type_str,"(%d)" % ntohs(rk.type), "class:",rk.rrset_class_str,"(%d)" % ntohs(rk.rrset_class))
d = r.entry.data
print (" RRDatas:",d.count+d.rrsig_count)
for j in range(0,d.count+d.rrsig_count):
print (" ",j,":","TTL=",d.rr_ttl[j],"RR data:")
print (dataHex(d.rr_data[j]," "))
def operate(id, event, qstate, qdata):
log_info("pythonmod: operate called, id: %d, event:%s" % (id, strmodulevent(event)))
#print ("pythonmod: per query data", qdata)
print ("Query:", qstate.qinfo.qname, qstate.qinfo.qname_list, qstate.qinfo.qname_str)
print ("Type:",qstate.qinfo.qtype_str,"(%d)" % qstate.qinfo.qtype)
print ("Class:",qstate.qinfo.qclass_str,"(%d)" % qstate.qinfo.qclass)
print ()
if (event == MODULE_EVENT_NEW or event == MODULE_EVENT_PASS) and (qstate.qinfo.qname_str.endswith("www2.example.com.")):
print (qstate.qinfo.qname_str)
qstate.ext_state[id] = MODULE_FINISHED
msg = DNSMessage(qstate.qinfo.qname_str, RR_TYPE_A, RR_CLASS_IN, PKT_QR | PKT_RA | PKT_AA) #, 300)
#msg.authority.append("xxx.seznam.cz. 10 IN A 192.168.1.1")
#msg.additional.append("yyy.seznam.cz. 10 IN A 1.1.1.2.")
# answer can be returned to the client without further checking.
if qstate.qinfo.qtype == RR_TYPE_A:
msg.answer.append("%s 10 IN A 192.168.1.1" % qstate.qinfo.qname_str)
if (qstate.qinfo.qtype == RR_TYPE_SRV) or (qstate.qinfo.qtype == RR_TYPE_ANY):
msg.answer.append("%s 10 IN SRV 0 0 80 neinfo.example.com." % qstate.qinfo.qname_str)
if (qstate.qinfo.qtype == RR_TYPE_TXT) or (qstate.qinfo.qtype == RR_TYPE_ANY):
msg.answer.append("%s 10 IN TXT path=/" % qstate.qinfo.qname_str)
print(msg.answer)
if not msg.set_return_msg(qstate):
qstate.ext_state[id] = MODULE_ERROR
return True
#qstate.return_msg.rep.security = 2 #pokud nebude nasledovat validator, je zapotrebi nastavit security, aby nebyl paket zahozen v mesh_send_reply
printReturnMsg(qstate)
#Authoritative result can't be stored in cache
#if (not storeQueryInCache(qstate, qstate.return_msg.qinfo, qstate.return_msg.rep, 0)):
# print "Can't store in cache"
# qstate.ext_state[id] = MODULE_ERROR
# return False
#print "Store OK"
qstate.return_rcode = RCODE_NOERROR
return True
if event == MODULE_EVENT_NEW:
qstate.ext_state[id] = MODULE_WAIT_MODULE
return True
if event == MODULE_EVENT_MODDONE:
log_info("pythonmod: previous module done")
qstate.ext_state[id] = MODULE_FINISHED
return True
if event == MODULE_EVENT_PASS:
log_info("pythonmod: event_pass")
qstate.ext_state[id] = MODULE_WAIT_MODULE
return True
log_err("pythonmod: BAD event")
qstate.ext_state[id] = MODULE_ERROR
return True
log_info("pythonmod: script loaded.")
|
{
"content_hash": "6524e219def61f24bb21943e761372a3",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 201,
"avg_line_length": 40.808383233532936,
"alnum_prop": 0.6290535583272193,
"repo_name": "NLnetLabs/unbound",
"id": "1eb7af5b16e5a3b2d993bb48ec9c048a535a8571",
"size": "6839",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testdata/pymod.tdir/pymod.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Awk",
"bytes": "10453"
},
{
"name": "Batchfile",
"bytes": "12286"
},
{
"name": "C",
"bytes": "5709398"
},
{
"name": "Lex",
"bytes": "29348"
},
{
"name": "M4",
"bytes": "147130"
},
{
"name": "Makefile",
"bytes": "124782"
},
{
"name": "NSIS",
"bytes": "8039"
},
{
"name": "Perl",
"bytes": "7937"
},
{
"name": "Python",
"bytes": "112498"
},
{
"name": "Roff",
"bytes": "1928"
},
{
"name": "SWIG",
"bytes": "96451"
},
{
"name": "Shell",
"bytes": "437159"
},
{
"name": "Yacc",
"bytes": "116072"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
import datetime
import os
from decimal import Decimal
import warnings
from django import forms
from django.core.exceptions import FieldError
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.validators import ValidationError
from django.db import connection
from django.db.models.query import EmptyQuerySet
from django.forms.models import model_to_dict
from django.utils._os import upath
from django.utils.unittest import skipUnless
from django.test import TestCase
from django.utils import six
from .models import (Article, ArticleStatus, BetterWriter, BigInt, Book,
Category, CommaSeparatedInteger, CustomFieldForExclusionModel, DerivedBook,
DerivedPost, ExplicitPK, FlexibleDatePost, ImprovedArticle,
ImprovedArticleWithParentLink, Inventory, Post, Price,
Product, TextFile, Writer, WriterProfile, Colour, ColourfulItem,
ArticleStatusNote, DateTimePost, CustomErrorMessage, test_images)
if test_images:
from .models import ImageFile, OptionalImageFile
class ImageFileForm(forms.ModelForm):
class Meta:
model = ImageFile
fields = '__all__'
class OptionalImageFileForm(forms.ModelForm):
class Meta:
model = OptionalImageFile
fields = '__all__'
class ProductForm(forms.ModelForm):
class Meta:
model = Product
fields = '__all__'
class PriceForm(forms.ModelForm):
class Meta:
model = Price
fields = '__all__'
class BookForm(forms.ModelForm):
class Meta:
model = Book
fields = '__all__'
class DerivedBookForm(forms.ModelForm):
class Meta:
model = DerivedBook
fields = '__all__'
class ExplicitPKForm(forms.ModelForm):
class Meta:
model = ExplicitPK
fields = ('key', 'desc',)
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = '__all__'
class DateTimePostForm(forms.ModelForm):
class Meta:
model = DateTimePost
fields = '__all__'
class DerivedPostForm(forms.ModelForm):
class Meta:
model = DerivedPost
fields = '__all__'
class CustomWriterForm(forms.ModelForm):
name = forms.CharField(required=False)
class Meta:
model = Writer
fields = '__all__'
class FlexDatePostForm(forms.ModelForm):
class Meta:
model = FlexibleDatePost
fields = '__all__'
class BaseCategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = '__all__'
class ArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = '__all__'
class PartialArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = ('headline','pub_date')
class RoykoForm(forms.ModelForm):
class Meta:
model = Writer
fields = '__all__'
class TestArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = '__all__'
class PartialArticleFormWithSlug(forms.ModelForm):
class Meta:
model = Article
fields = ('headline', 'slug', 'pub_date')
class ArticleStatusForm(forms.ModelForm):
class Meta:
model = ArticleStatus
fields = '__all__'
class InventoryForm(forms.ModelForm):
class Meta:
model = Inventory
fields = '__all__'
class SelectInventoryForm(forms.Form):
items = forms.ModelMultipleChoiceField(Inventory.objects.all(), to_field_name='barcode')
class CustomFieldForExclusionForm(forms.ModelForm):
class Meta:
model = CustomFieldForExclusionModel
fields = ['name', 'markup']
class ShortCategory(forms.ModelForm):
name = forms.CharField(max_length=5)
slug = forms.CharField(max_length=5)
url = forms.CharField(max_length=3)
class Meta:
model = Category
fields = '__all__'
class ImprovedArticleForm(forms.ModelForm):
class Meta:
model = ImprovedArticle
fields = '__all__'
class ImprovedArticleWithParentLinkForm(forms.ModelForm):
class Meta:
model = ImprovedArticleWithParentLink
fields = '__all__'
class BetterWriterForm(forms.ModelForm):
class Meta:
model = BetterWriter
fields = '__all__'
class WriterProfileForm(forms.ModelForm):
class Meta:
model = WriterProfile
fields = '__all__'
class TextFileForm(forms.ModelForm):
class Meta:
model = TextFile
fields = '__all__'
class BigIntForm(forms.ModelForm):
class Meta:
model = BigInt
fields = '__all__'
class ModelFormWithMedia(forms.ModelForm):
class Media:
js = ('/some/form/javascript',)
css = {
'all': ('/some/form/css',)
}
class Meta:
model = TextFile
fields = '__all__'
class CommaSeparatedIntegerForm(forms.ModelForm):
class Meta:
model = CommaSeparatedInteger
fields = '__all__'
class PriceFormWithoutQuantity(forms.ModelForm):
class Meta:
model = Price
exclude = ('quantity',)
class ColourfulItemForm(forms.ModelForm):
class Meta:
model = ColourfulItem
fields = '__all__'
# model forms for testing work on #9321:
class StatusNoteForm(forms.ModelForm):
class Meta:
model = ArticleStatusNote
fields = '__all__'
class StatusNoteCBM2mForm(forms.ModelForm):
class Meta:
model = ArticleStatusNote
fields = '__all__'
widgets = {'status': forms.CheckboxSelectMultiple}
class CustomErrorMessageForm(forms.ModelForm):
name1 = forms.CharField(error_messages={'invalid': 'Form custom error message.'})
class Meta:
fields = '__all__'
model = CustomErrorMessage
class ModelFormBaseTest(TestCase):
def test_base_form(self):
self.assertEqual(list(BaseCategoryForm.base_fields),
['name', 'slug', 'url'])
def test_missing_fields_attribute(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", PendingDeprecationWarning)
class MissingFieldsForm(forms.ModelForm):
class Meta:
model = Category
# There is some internal state in warnings module which means that
# if a warning has been seen already, the catch_warnings won't
# have recorded it. The following line therefore will not work reliably:
# self.assertEqual(w[0].category, PendingDeprecationWarning)
# Until end of the deprecation cycle, should still create the
# form as before:
self.assertEqual(list(MissingFieldsForm.base_fields),
['name', 'slug', 'url'])
def test_extra_fields(self):
class ExtraFields(BaseCategoryForm):
some_extra_field = forms.BooleanField()
self.assertEqual(list(ExtraFields.base_fields),
['name', 'slug', 'url', 'some_extra_field'])
def test_replace_field(self):
class ReplaceField(forms.ModelForm):
url = forms.BooleanField()
class Meta:
model = Category
fields = '__all__'
self.assertIsInstance(ReplaceField.base_fields['url'],
forms.fields.BooleanField)
def test_replace_field_variant_2(self):
# Should have the same result as before,
# but 'fields' attribute specified differently
class ReplaceField(forms.ModelForm):
url = forms.BooleanField()
class Meta:
model = Category
fields = ['url']
self.assertIsInstance(ReplaceField.base_fields['url'],
forms.fields.BooleanField)
def test_replace_field_variant_3(self):
# Should have the same result as before,
# but 'fields' attribute specified differently
class ReplaceField(forms.ModelForm):
url = forms.BooleanField()
class Meta:
model = Category
fields = [] # url will still appear, since it is explicit above
self.assertIsInstance(ReplaceField.base_fields['url'],
forms.fields.BooleanField)
def test_override_field(self):
class WriterForm(forms.ModelForm):
book = forms.CharField(required=False)
class Meta:
model = Writer
fields = '__all__'
wf = WriterForm({'name': 'Richard Lockridge'})
self.assertTrue(wf.is_valid())
def test_limit_nonexistent_field(self):
expected_msg = 'Unknown field(s) (nonexistent) specified for Category'
with self.assertRaisesMessage(FieldError, expected_msg):
class InvalidCategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = ['nonexistent']
def test_limit_fields_with_string(self):
expected_msg = "CategoryForm.Meta.fields cannot be a string. Did you mean to type: ('url',)?"
with self.assertRaisesMessage(TypeError, expected_msg):
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = ('url') # note the missing comma
def test_exclude_fields(self):
class ExcludeFields(forms.ModelForm):
class Meta:
model = Category
exclude = ['url']
self.assertEqual(list(ExcludeFields.base_fields),
['name', 'slug'])
def test_exclude_nonexistent_field(self):
class ExcludeFields(forms.ModelForm):
class Meta:
model = Category
exclude = ['nonexistent']
self.assertEqual(list(ExcludeFields.base_fields),
['name', 'slug', 'url'])
def test_exclude_fields_with_string(self):
expected_msg = "CategoryForm.Meta.exclude cannot be a string. Did you mean to type: ('url',)?"
with self.assertRaisesMessage(TypeError, expected_msg):
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
exclude = ('url') # note the missing comma
def test_confused_form(self):
class ConfusedForm(forms.ModelForm):
""" Using 'fields' *and* 'exclude'. Not sure why you'd want to do
this, but uh, "be liberal in what you accept" and all.
"""
class Meta:
model = Category
fields = ['name', 'url']
exclude = ['url']
self.assertEqual(list(ConfusedForm.base_fields),
['name'])
def test_mixmodel_form(self):
class MixModelForm(BaseCategoryForm):
""" Don't allow more than one 'model' definition in the
inheritance hierarchy. Technically, it would generate a valid
form, but the fact that the resulting save method won't deal with
multiple objects is likely to trip up people not familiar with the
mechanics.
"""
class Meta:
model = Article
fields = '__all__'
# MixModelForm is now an Article-related thing, because MixModelForm.Meta
# overrides BaseCategoryForm.Meta.
self.assertEqual(
list(MixModelForm.base_fields),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_article_form(self):
self.assertEqual(
list(ArticleForm.base_fields),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_bad_form(self):
#First class with a Meta class wins...
class BadForm(ArticleForm, BaseCategoryForm):
pass
self.assertEqual(
list(BadForm.base_fields),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_invalid_meta_model(self):
class InvalidModelForm(forms.ModelForm):
class Meta:
pass # no model
# Can't create new form
with self.assertRaises(ValueError):
f = InvalidModelForm()
# Even if you provide a model instance
with self.assertRaises(ValueError):
f = InvalidModelForm(instance=Category)
def test_subcategory_form(self):
class SubCategoryForm(BaseCategoryForm):
""" Subclassing without specifying a Meta on the class will use
the parent's Meta (or the first parent in the MRO if there are
multiple parent classes).
"""
pass
self.assertEqual(list(SubCategoryForm.base_fields),
['name', 'slug', 'url'])
def test_subclassmeta_form(self):
class SomeCategoryForm(forms.ModelForm):
checkbox = forms.BooleanField()
class Meta:
model = Category
fields = '__all__'
class SubclassMeta(SomeCategoryForm):
""" We can also subclass the Meta inner class to change the fields
list.
"""
class Meta(SomeCategoryForm.Meta):
exclude = ['url']
self.assertHTMLEqual(
str(SubclassMeta()),
"""<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" maxlength="20" /></td></tr>
<tr><th><label for="id_slug">Slug:</label></th><td><input id="id_slug" type="text" name="slug" maxlength="20" /></td></tr>
<tr><th><label for="id_checkbox">Checkbox:</label></th><td><input type="checkbox" name="checkbox" id="id_checkbox" /></td></tr>"""
)
def test_orderfields_form(self):
class OrderFields(forms.ModelForm):
class Meta:
model = Category
fields = ['url', 'name']
self.assertEqual(list(OrderFields.base_fields),
['url', 'name'])
self.assertHTMLEqual(
str(OrderFields()),
"""<tr><th><label for="id_url">The URL:</label></th><td><input id="id_url" type="text" name="url" maxlength="40" /></td></tr>
<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" maxlength="20" /></td></tr>"""
)
def test_orderfields2_form(self):
class OrderFields2(forms.ModelForm):
class Meta:
model = Category
fields = ['slug', 'url', 'name']
exclude = ['url']
self.assertEqual(list(OrderFields2.base_fields),
['slug', 'name'])
class FieldOverridesTroughFormMetaForm(forms.ModelForm):
class Meta:
model = Category
fields = ['name', 'url', 'slug']
widgets = {
'name': forms.Textarea,
'url': forms.TextInput(attrs={'class': 'url'})
}
labels = {
'name': 'Title',
}
help_texts = {
'slug': 'Watch out! Letters, numbers, underscores and hyphens only.',
}
error_messages = {
'slug': {
'invalid': (
"Didn't you read the help text? "
"We said letters, numbers, underscores and hyphens only!"
)
}
}
class TestFieldOverridesTroughFormMeta(TestCase):
def test_widget_overrides(self):
form = FieldOverridesTroughFormMetaForm()
self.assertHTMLEqual(
str(form['name']),
'<textarea id="id_name" rows="10" cols="40" name="name"></textarea>',
)
self.assertHTMLEqual(
str(form['url']),
'<input id="id_url" type="text" class="url" name="url" maxlength="40" />',
)
self.assertHTMLEqual(
str(form['slug']),
'<input id="id_slug" type="text" name="slug" maxlength="20" />',
)
def test_label_overrides(self):
form = FieldOverridesTroughFormMetaForm()
self.assertHTMLEqual(
str(form['name'].label_tag()),
'<label for="id_name">Title:</label>',
)
self.assertHTMLEqual(
str(form['url'].label_tag()),
'<label for="id_url">The URL:</label>',
)
self.assertHTMLEqual(
str(form['slug'].label_tag()),
'<label for="id_slug">Slug:</label>',
)
def test_help_text_overrides(self):
form = FieldOverridesTroughFormMetaForm()
self.assertEqual(
form['slug'].help_text,
'Watch out! Letters, numbers, underscores and hyphens only.',
)
def test_error_messages_overrides(self):
form = FieldOverridesTroughFormMetaForm(data={
'name': 'Category',
'url': '/category/',
'slug': '!%#*@',
})
form.full_clean()
error = [
"Didn't you read the help text? "
"We said letters, numbers, underscores and hyphens only!",
]
self.assertEqual(form.errors, {'slug': error})
class IncompleteCategoryFormWithFields(forms.ModelForm):
"""
A form that replaces the model's url field with a custom one. This should
prevent the model field's validation from being called.
"""
url = forms.CharField(required=False)
class Meta:
fields = ('name', 'slug')
model = Category
class IncompleteCategoryFormWithExclude(forms.ModelForm):
"""
A form that replaces the model's url field with a custom one. This should
prevent the model field's validation from being called.
"""
url = forms.CharField(required=False)
class Meta:
exclude = ['url']
model = Category
class ValidationTest(TestCase):
def test_validates_with_replaced_field_not_specified(self):
form = IncompleteCategoryFormWithFields(data={'name': 'some name', 'slug': 'some-slug'})
assert form.is_valid()
def test_validates_with_replaced_field_excluded(self):
form = IncompleteCategoryFormWithExclude(data={'name': 'some name', 'slug': 'some-slug'})
assert form.is_valid()
def test_notrequired_overrides_notblank(self):
form = CustomWriterForm({})
assert form.is_valid()
# unique/unique_together validation
class UniqueTest(TestCase):
def setUp(self):
self.writer = Writer.objects.create(name='Mike Royko')
def test_simple_unique(self):
form = ProductForm({'slug': 'teddy-bear-blue'})
self.assertTrue(form.is_valid())
obj = form.save()
form = ProductForm({'slug': 'teddy-bear-blue'})
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ['Product with this Slug already exists.'])
form = ProductForm({'slug': 'teddy-bear-blue'}, instance=obj)
self.assertTrue(form.is_valid())
def test_unique_together(self):
"""ModelForm test of unique_together constraint"""
form = PriceForm({'price': '6.00', 'quantity': '1'})
self.assertTrue(form.is_valid())
form.save()
form = PriceForm({'price': '6.00', 'quantity': '1'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], ['Price with this Price and Quantity already exists.'])
def test_unique_null(self):
title = 'I May Be Wrong But I Doubt It'
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertTrue(form.is_valid())
form.save()
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], ['Book with this Title and Author already exists.'])
form = BookForm({'title': title})
self.assertTrue(form.is_valid())
form.save()
form = BookForm({'title': title})
self.assertTrue(form.is_valid())
def test_inherited_unique(self):
title = 'Boss'
Book.objects.create(title=title, author=self.writer, special_id=1)
form = DerivedBookForm({'title': 'Other', 'author': self.writer.pk, 'special_id': '1', 'isbn': '12345'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['special_id'], ['Book with this Special id already exists.'])
def test_inherited_unique_together(self):
title = 'Boss'
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertTrue(form.is_valid())
form.save()
form = DerivedBookForm({'title': title, 'author': self.writer.pk, 'isbn': '12345'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], ['Book with this Title and Author already exists.'])
def test_abstract_inherited_unique(self):
title = 'Boss'
isbn = '12345'
dbook = DerivedBook.objects.create(title=title, author=self.writer, isbn=isbn)
form = DerivedBookForm({'title': 'Other', 'author': self.writer.pk, 'isbn': isbn})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['isbn'], ['Derived book with this Isbn already exists.'])
def test_abstract_inherited_unique_together(self):
title = 'Boss'
isbn = '12345'
dbook = DerivedBook.objects.create(title=title, author=self.writer, isbn=isbn)
form = DerivedBookForm({
'title': 'Other',
'author': self.writer.pk,
'isbn': '9876',
'suffix1': '0',
'suffix2': '0'
})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'],
['Derived book with this Suffix1 and Suffix2 already exists.'])
def test_explicitpk_unspecified(self):
"""Test for primary_key being in the form and failing validation."""
form = ExplicitPKForm({'key': '', 'desc': '' })
self.assertFalse(form.is_valid())
def test_explicitpk_unique(self):
"""Ensure keys and blank character strings are tested for uniqueness."""
form = ExplicitPKForm({'key': 'key1', 'desc': ''})
self.assertTrue(form.is_valid())
form.save()
form = ExplicitPKForm({'key': 'key1', 'desc': ''})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 3)
self.assertEqual(form.errors['__all__'], ['Explicit pk with this Key and Desc already exists.'])
self.assertEqual(form.errors['desc'], ['Explicit pk with this Desc already exists.'])
self.assertEqual(form.errors['key'], ['Explicit pk with this Key already exists.'])
def test_unique_for_date(self):
p = Post.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
form = PostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['title'], ['Title must be unique for Posted date.'])
form = PostForm({'title': "Work on Django 1.1 begins", 'posted': '2008-09-03'})
self.assertTrue(form.is_valid())
form = PostForm({'title': "Django 1.0 is released", 'posted': '2008-09-04'})
self.assertTrue(form.is_valid())
form = PostForm({'slug': "Django 1.0", 'posted': '2008-01-01'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ['Slug must be unique for Posted year.'])
form = PostForm({'subtitle': "Finally", 'posted': '2008-09-30'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subtitle'], ['Subtitle must be unique for Posted month.'])
form = PostForm({'subtitle': "Finally", "title": "Django 1.0 is released",
"slug": "Django 1.0", 'posted': '2008-09-03'}, instance=p)
self.assertTrue(form.is_valid())
form = PostForm({'title': "Django 1.0 is released"})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['posted'], ['This field is required.'])
def test_unique_for_date_in_exclude(self):
"""If the date for unique_for_* constraints is excluded from the
ModelForm (in this case 'posted' has editable=False, then the
constraint should be ignored."""
p = DateTimePost.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally",
posted=datetime.datetime(2008, 9, 3, 10, 10, 1))
# 'title' has unique_for_date='posted'
form = DateTimePostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertTrue(form.is_valid())
# 'slug' has unique_for_year='posted'
form = DateTimePostForm({'slug': "Django 1.0", 'posted': '2008-01-01'})
self.assertTrue(form.is_valid())
# 'subtitle' has unique_for_month='posted'
form = DateTimePostForm({'subtitle': "Finally", 'posted': '2008-09-30'})
self.assertTrue(form.is_valid())
def test_inherited_unique_for_date(self):
p = Post.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
form = DerivedPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['title'], ['Title must be unique for Posted date.'])
form = DerivedPostForm({'title': "Work on Django 1.1 begins", 'posted': '2008-09-03'})
self.assertTrue(form.is_valid())
form = DerivedPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-04'})
self.assertTrue(form.is_valid())
form = DerivedPostForm({'slug': "Django 1.0", 'posted': '2008-01-01'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ['Slug must be unique for Posted year.'])
form = DerivedPostForm({'subtitle': "Finally", 'posted': '2008-09-30'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subtitle'], ['Subtitle must be unique for Posted month.'])
form = DerivedPostForm({'subtitle': "Finally", "title": "Django 1.0 is released",
"slug": "Django 1.0", 'posted': '2008-09-03'}, instance=p)
self.assertTrue(form.is_valid())
def test_unique_for_date_with_nullable_date(self):
p = FlexibleDatePost.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
form = FlexDatePostForm({'title': "Django 1.0 is released"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({'slug': "Django 1.0"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({'subtitle': "Finally"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({'subtitle': "Finally", "title": "Django 1.0 is released",
"slug": "Django 1.0"}, instance=p)
self.assertTrue(form.is_valid())
class ModelToDictTests(TestCase):
"""
Tests for forms.models.model_to_dict
"""
def test_model_to_dict_many_to_many(self):
categories=[
Category(name='TestName1', slug='TestName1', url='url1'),
Category(name='TestName2', slug='TestName2', url='url2'),
Category(name='TestName3', slug='TestName3', url='url3')
]
for c in categories:
c.save()
writer = Writer(name='Test writer')
writer.save()
art = Article(
headline='Test article',
slug='test-article',
pub_date=datetime.date(1988, 1, 4),
writer=writer,
article='Hello.'
)
art.save()
for c in categories:
art.categories.add(c)
art.save()
with self.assertNumQueries(1):
d = model_to_dict(art)
#Ensure all many-to-many categories appear in model_to_dict
for c in categories:
self.assertIn(c.pk, d['categories'])
#Ensure many-to-many relation appears as a list
self.assertIsInstance(d['categories'], list)
class OldFormForXTests(TestCase):
def test_base_form(self):
self.assertEqual(Category.objects.count(), 0)
f = BaseCategoryForm()
self.assertHTMLEqual(
str(f),
"""<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" maxlength="20" /></td></tr>
<tr><th><label for="id_slug">Slug:</label></th><td><input id="id_slug" type="text" name="slug" maxlength="20" /></td></tr>
<tr><th><label for="id_url">The URL:</label></th><td><input id="id_url" type="text" name="url" maxlength="40" /></td></tr>"""
)
self.assertHTMLEqual(
str(f.as_ul()),
"""<li><label for="id_name">Name:</label> <input id="id_name" type="text" name="name" maxlength="20" /></li>
<li><label for="id_slug">Slug:</label> <input id="id_slug" type="text" name="slug" maxlength="20" /></li>
<li><label for="id_url">The URL:</label> <input id="id_url" type="text" name="url" maxlength="40" /></li>"""
)
self.assertHTMLEqual(
str(f["name"]),
"""<input id="id_name" type="text" name="name" maxlength="20" />""")
def test_auto_id(self):
f = BaseCategoryForm(auto_id=False)
self.assertHTMLEqual(
str(f.as_ul()),
"""<li>Name: <input type="text" name="name" maxlength="20" /></li>
<li>Slug: <input type="text" name="slug" maxlength="20" /></li>
<li>The URL: <input type="text" name="url" maxlength="40" /></li>"""
)
def test_with_data(self):
self.assertEqual(Category.objects.count(), 0)
f = BaseCategoryForm({'name': 'Entertainment',
'slug': 'entertainment',
'url': 'entertainment'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['name'], 'Entertainment')
self.assertEqual(f.cleaned_data['slug'], 'entertainment')
self.assertEqual(f.cleaned_data['url'], 'entertainment')
c1 = f.save()
# Testing wether the same object is returned from the
# ORM... not the fastest way...
self.assertEqual(c1, Category.objects.all()[0])
self.assertEqual(c1.name, "Entertainment")
self.assertEqual(Category.objects.count(), 1)
f = BaseCategoryForm({'name': "It's a test",
'slug': 'its-test',
'url': 'test'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['name'], "It's a test")
self.assertEqual(f.cleaned_data['slug'], 'its-test')
self.assertEqual(f.cleaned_data['url'], 'test')
c2 = f.save()
# Testing wether the same object is returned from the
# ORM... not the fastest way...
self.assertEqual(c2, Category.objects.get(pk=c2.pk))
self.assertEqual(c2.name, "It's a test")
self.assertEqual(Category.objects.count(), 2)
# If you call save() with commit=False, then it will return an object that
# hasn't yet been saved to the database. In this case, it's up to you to call
# save() on the resulting model instance.
f = BaseCategoryForm({'name': 'Third test', 'slug': 'third-test', 'url': 'third'})
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data['url'], 'third')
self.assertEqual(f.cleaned_data['name'], 'Third test')
self.assertEqual(f.cleaned_data['slug'], 'third-test')
c3 = f.save(commit=False)
self.assertEqual(c3.name, "Third test")
self.assertEqual(Category.objects.count(), 2)
c3.save()
self.assertEqual(Category.objects.count(), 3)
# If you call save() with invalid data, you'll get a ValueError.
f = BaseCategoryForm({'name': '', 'slug': 'not a slug!', 'url': 'foo'})
self.assertEqual(f.errors['name'], ['This field is required.'])
self.assertEqual(f.errors['slug'], ["Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens."])
self.assertEqual(f.cleaned_data, {'url': 'foo'})
with self.assertRaises(ValueError):
f.save()
f = BaseCategoryForm({'name': '', 'slug': '', 'url': 'foo'})
with self.assertRaises(ValueError):
f.save()
# Create a couple of Writers.
w_royko = Writer(name='Mike Royko')
w_royko.save()
w_woodward = Writer(name='Bob Woodward')
w_woodward.save()
# ManyToManyFields are represented by a MultipleChoiceField, ForeignKeys and any
# fields with the 'choices' attribute are represented by a ChoiceField.
f = ArticleForm(auto_id=False)
self.assertHTMLEqual(six.text_type(f), '''<tr><th>Headline:</th><td><input type="text" name="headline" maxlength="50" /></td></tr>
<tr><th>Slug:</th><td><input type="text" name="slug" maxlength="50" /></td></tr>
<tr><th>Pub date:</th><td><input type="text" name="pub_date" /></td></tr>
<tr><th>Writer:</th><td><select name="writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></td></tr>
<tr><th>Article:</th><td><textarea rows="10" cols="40" name="article"></textarea></td></tr>
<tr><th>Categories:</th><td><select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select><br /><span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></td></tr>
<tr><th>Status:</th><td><select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></td></tr>''' % (w_woodward.pk, w_royko.pk, c1.pk, c2.pk, c3.pk))
# You can restrict a form to a subset of the complete list of fields
# by providing a 'fields' argument. If you try to save a
# model created with such a form, you need to ensure that the fields
# that are _not_ on the form have default values, or are allowed to have
# a value of None. If a field isn't specified on a form, the object created
# from the form can't provide a value for that field!
f = PartialArticleForm(auto_id=False)
self.assertHTMLEqual(six.text_type(f), '''<tr><th>Headline:</th><td><input type="text" name="headline" maxlength="50" /></td></tr>
<tr><th>Pub date:</th><td><input type="text" name="pub_date" /></td></tr>''')
# When the ModelForm is passed an instance, that instance's current values are
# inserted as 'initial' data in each Field.
w = Writer.objects.get(name='Mike Royko')
f = RoykoForm(auto_id=False, instance=w)
self.assertHTMLEqual(six.text_type(f), '''<tr><th>Name:</th><td><input type="text" name="name" value="Mike Royko" maxlength="50" /><br /><span class="helptext">Use both first and last names.</span></td></tr>''')
art = Article(
headline='Test article',
slug='test-article',
pub_date=datetime.date(1988, 1, 4),
writer=w,
article='Hello.'
)
art.save()
art_id_1 = art.id
self.assertEqual(art_id_1 is not None, True)
f = TestArticleForm(auto_id=False, instance=art)
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" value="Test article" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" value="test-article" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" /></li>
<li>Writer: <select name="writer">
<option value="">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s" selected="selected">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article">Hello.</textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (w_woodward.pk, w_royko.pk, c1.pk, c2.pk, c3.pk))
f = TestArticleForm({
'headline': 'Test headline',
'slug': 'test-headline',
'pub_date': '1984-02-06',
'writer': six.text_type(w_royko.pk),
'article': 'Hello.'
}, instance=art)
self.assertEqual(f.errors, {})
self.assertEqual(f.is_valid(), True)
test_art = f.save()
self.assertEqual(test_art.id == art_id_1, True)
test_art = Article.objects.get(id=art_id_1)
self.assertEqual(test_art.headline, 'Test headline')
# You can create a form over a subset of the available fields
# by specifying a 'fields' argument to form_for_instance.
f = PartialArticleFormWithSlug({
'headline': 'New headline',
'slug': 'new-headline',
'pub_date': '1988-01-04'
}, auto_id=False, instance=art)
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" value="New headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" value="new-headline" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" /></li>''')
self.assertEqual(f.is_valid(), True)
new_art = f.save()
self.assertEqual(new_art.id == art_id_1, True)
new_art = Article.objects.get(id=art_id_1)
self.assertEqual(new_art.headline, 'New headline')
# Add some categories and test the many-to-many form output.
self.assertQuerysetEqual(new_art.categories.all(), [])
new_art.categories.add(Category.objects.get(name='Entertainment'))
self.assertQuerysetEqual(new_art.categories.all(), ["Entertainment"])
f = TestArticleForm(auto_id=False, instance=new_art)
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" value="New headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" value="new-headline" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" /></li>
<li>Writer: <select name="writer">
<option value="">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s" selected="selected">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article">Hello.</textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s" selected="selected">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (w_woodward.pk, w_royko.pk, c1.pk, c2.pk, c3.pk))
# Initial values can be provided for model forms
f = TestArticleForm(
auto_id=False,
initial={
'headline': 'Your headline here',
'categories': [str(c1.id), str(c2.id)]
})
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" value="Your headline here" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" /></li>
<li>Writer: <select name="writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article"></textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s" selected="selected">Entertainment</option>
<option value="%s" selected="selected">It's a test</option>
<option value="%s">Third test</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (w_woodward.pk, w_royko.pk, c1.pk, c2.pk, c3.pk))
f = TestArticleForm({
'headline': 'New headline',
'slug': 'new-headline',
'pub_date': '1988-01-04',
'writer': six.text_type(w_royko.pk),
'article': 'Hello.',
'categories': [six.text_type(c1.id), six.text_type(c2.id)]
}, instance=new_art)
new_art = f.save()
self.assertEqual(new_art.id == art_id_1, True)
new_art = Article.objects.get(id=art_id_1)
self.assertQuerysetEqual(new_art.categories.order_by('name'),
["Entertainment", "It's a test"])
# Now, submit form data with no categories. This deletes the existing categories.
f = TestArticleForm({'headline': 'New headline', 'slug': 'new-headline', 'pub_date': '1988-01-04',
'writer': six.text_type(w_royko.pk), 'article': 'Hello.'}, instance=new_art)
new_art = f.save()
self.assertEqual(new_art.id == art_id_1, True)
new_art = Article.objects.get(id=art_id_1)
self.assertQuerysetEqual(new_art.categories.all(), [])
# Create a new article, with categories, via the form.
f = ArticleForm({'headline': 'The walrus was Paul', 'slug': 'walrus-was-paul', 'pub_date': '1967-11-01',
'writer': six.text_type(w_royko.pk), 'article': 'Test.', 'categories': [six.text_type(c1.id), six.text_type(c2.id)]})
new_art = f.save()
art_id_2 = new_art.id
self.assertEqual(art_id_2 not in (None, art_id_1), True)
new_art = Article.objects.get(id=art_id_2)
self.assertQuerysetEqual(new_art.categories.order_by('name'), ["Entertainment", "It's a test"])
# Create a new article, with no categories, via the form.
f = ArticleForm({'headline': 'The walrus was Paul', 'slug': 'walrus-was-paul', 'pub_date': '1967-11-01',
'writer': six.text_type(w_royko.pk), 'article': 'Test.'})
new_art = f.save()
art_id_3 = new_art.id
self.assertEqual(art_id_3 not in (None, art_id_1, art_id_2), True)
new_art = Article.objects.get(id=art_id_3)
self.assertQuerysetEqual(new_art.categories.all(), [])
# Create a new article, with categories, via the form, but use commit=False.
# The m2m data won't be saved until save_m2m() is invoked on the form.
f = ArticleForm({'headline': 'The walrus was Paul', 'slug': 'walrus-was-paul', 'pub_date': '1967-11-01',
'writer': six.text_type(w_royko.pk), 'article': 'Test.', 'categories': [six.text_type(c1.id), six.text_type(c2.id)]})
new_art = f.save(commit=False)
# Manually save the instance
new_art.save()
art_id_4 = new_art.id
self.assertEqual(art_id_4 not in (None, art_id_1, art_id_2, art_id_3), True)
# The instance doesn't have m2m data yet
new_art = Article.objects.get(id=art_id_4)
self.assertQuerysetEqual(new_art.categories.all(), [])
# Save the m2m data on the form
f.save_m2m()
self.assertQuerysetEqual(new_art.categories.order_by('name'), ["Entertainment", "It's a test"])
# Here, we define a custom ModelForm. Because it happens to have the same fields as
# the Category model, we can just call the form's save() to apply its changes to an
# existing Category instance.
cat = Category.objects.get(name='Third test')
self.assertEqual(cat.name, "Third test")
self.assertEqual(cat.id == c3.id, True)
form = ShortCategory({'name': 'Third', 'slug': 'third', 'url': '3rd'}, instance=cat)
self.assertEqual(form.save().name, 'Third')
self.assertEqual(Category.objects.get(id=c3.id).name, 'Third')
# Here, we demonstrate that choices for a ForeignKey ChoiceField are determined
# at runtime, based on the data in the database when the form is displayed, not
# the data in the database when the form is instantiated.
f = ArticleForm(auto_id=False)
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" /></li>
<li>Writer: <select name="writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article"></textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (w_woodward.pk, w_royko.pk, c1.pk, c2.pk, c3.pk))
c4 = Category.objects.create(name='Fourth', url='4th')
self.assertEqual(c4.name, 'Fourth')
w_bernstein = Writer.objects.create(name='Carl Bernstein')
self.assertEqual(w_bernstein.name, 'Carl Bernstein')
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" /></li>
<li>Writer: <select name="writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Carl Bernstein</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article"></textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third</option>
<option value="%s">Fourth</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (w_woodward.pk, w_bernstein.pk, w_royko.pk, c1.pk, c2.pk, c3.pk, c4.pk))
# ModelChoiceField ############################################################
f = forms.ModelChoiceField(Category.objects.all())
self.assertEqual(list(f.choices), [
('', '---------'),
(c1.pk, 'Entertainment'),
(c2.pk, "It's a test"),
(c3.pk, 'Third'),
(c4.pk, 'Fourth')])
self.assertEqual(5, len(f.choices))
with self.assertRaises(ValidationError):
f.clean('')
with self.assertRaises(ValidationError):
f.clean(None)
with self.assertRaises(ValidationError):
f.clean(0)
self.assertEqual(f.clean(c3.id).name, 'Third')
self.assertEqual(f.clean(c2.id).name, "It's a test")
# Add a Category object *after* the ModelChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
c5 = Category.objects.create(name='Fifth', url='5th')
self.assertEqual(c5.name, 'Fifth')
self.assertEqual(f.clean(c5.id).name, 'Fifth')
# Delete a Category object *after* the ModelChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
Category.objects.get(url='5th').delete()
with self.assertRaises(ValidationError):
f.clean(c5.id)
f = forms.ModelChoiceField(Category.objects.filter(pk=c1.id), required=False)
self.assertEqual(f.clean(''), None)
f.clean('')
self.assertEqual(f.clean(str(c1.id)).name, "Entertainment")
with self.assertRaises(ValidationError):
f.clean('100')
# queryset can be changed after the field is created.
f.queryset = Category.objects.exclude(name='Fourth')
self.assertEqual(list(f.choices), [
('', '---------'),
(c1.pk, 'Entertainment'),
(c2.pk, "It's a test"),
(c3.pk, 'Third')])
self.assertEqual(f.clean(c3.id).name, 'Third')
with self.assertRaises(ValidationError):
f.clean(c4.id)
# check that we can safely iterate choices repeatedly
gen_one = list(f.choices)
gen_two = f.choices
self.assertEqual(gen_one[2], (c2.pk, "It's a test"))
self.assertEqual(list(gen_two), [
('', '---------'),
(c1.pk, 'Entertainment'),
(c2.pk, "It's a test"),
(c3.pk, 'Third')])
# check that we can override the label_from_instance method to print custom labels (#4620)
f.queryset = Category.objects.all()
f.label_from_instance = lambda obj: "category " + str(obj)
self.assertEqual(list(f.choices), [
('', '---------'),
(c1.pk, 'category Entertainment'),
(c2.pk, "category It's a test"),
(c3.pk, 'category Third'),
(c4.pk, 'category Fourth')])
# ModelMultipleChoiceField ####################################################
f = forms.ModelMultipleChoiceField(Category.objects.all())
self.assertEqual(list(f.choices), [
(c1.pk, 'Entertainment'),
(c2.pk, "It's a test"),
(c3.pk, 'Third'),
(c4.pk, 'Fourth')])
with self.assertRaises(ValidationError):
f.clean(None)
with self.assertRaises(ValidationError):
f.clean([])
self.assertQuerysetEqual(f.clean([c1.id]), ["Entertainment"])
self.assertQuerysetEqual(f.clean([c2.id]), ["It's a test"])
self.assertQuerysetEqual(f.clean([str(c1.id)]), ["Entertainment"])
self.assertQuerysetEqual(f.clean([str(c1.id), str(c2.id)]), ["Entertainment", "It's a test"],
ordered=False)
self.assertQuerysetEqual(f.clean([c1.id, str(c2.id)]), ["Entertainment", "It's a test"],
ordered=False)
self.assertQuerysetEqual(f.clean((c1.id, str(c2.id))), ["Entertainment", "It's a test"],
ordered=False)
with self.assertRaises(ValidationError):
f.clean(['100'])
with self.assertRaises(ValidationError):
f.clean('hello')
with self.assertRaises(ValidationError):
f.clean(['fail'])
# Add a Category object *after* the ModelMultipleChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
# Note, we are using an id of 1006 here since tests that run before
# this may create categories with primary keys up to 6. Use
# a number that is will not conflict.
c6 = Category.objects.create(id=1006, name='Sixth', url='6th')
self.assertEqual(c6.name, 'Sixth')
self.assertQuerysetEqual(f.clean([c6.id]), ["Sixth"])
# Delete a Category object *after* the ModelMultipleChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
Category.objects.get(url='6th').delete()
with self.assertRaises(ValidationError):
f.clean([c6.id])
f = forms.ModelMultipleChoiceField(Category.objects.all(), required=False)
self.assertIsInstance(f.clean([]), EmptyQuerySet)
self.assertIsInstance(f.clean(()), EmptyQuerySet)
with self.assertRaises(ValidationError):
f.clean(['10'])
with self.assertRaises(ValidationError):
f.clean([str(c3.id), '10'])
with self.assertRaises(ValidationError):
f.clean([str(c1.id), '10'])
# queryset can be changed after the field is created.
f.queryset = Category.objects.exclude(name='Fourth')
self.assertEqual(list(f.choices), [
(c1.pk, 'Entertainment'),
(c2.pk, "It's a test"),
(c3.pk, 'Third')])
self.assertQuerysetEqual(f.clean([c3.id]), ["Third"])
with self.assertRaises(ValidationError):
f.clean([c4.id])
with self.assertRaises(ValidationError):
f.clean([str(c3.id), str(c4.id)])
f.queryset = Category.objects.all()
f.label_from_instance = lambda obj: "multicategory " + str(obj)
self.assertEqual(list(f.choices), [
(c1.pk, 'multicategory Entertainment'),
(c2.pk, "multicategory It's a test"),
(c3.pk, 'multicategory Third'),
(c4.pk, 'multicategory Fourth')])
# OneToOneField ###############################################################
self.assertEqual(list(ImprovedArticleForm.base_fields), ['article'])
self.assertEqual(list(ImprovedArticleWithParentLinkForm.base_fields), [])
bw = BetterWriter(name='Joe Better', score=10)
bw.save()
self.assertEqual(sorted(model_to_dict(bw)),
['id', 'name', 'score', 'writer_ptr'])
form = BetterWriterForm({'name': 'Some Name', 'score': 12})
self.assertEqual(form.is_valid(), True)
bw2 = form.save()
bw2.delete()
form = WriterProfileForm()
self.assertHTMLEqual(form.as_p(), '''<p><label for="id_writer">Writer:</label> <select name="writer" id="id_writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Carl Bernstein</option>
<option value="%s">Joe Better</option>
<option value="%s">Mike Royko</option>
</select></p>
<p><label for="id_age">Age:</label> <input type="number" name="age" id="id_age" min="0" /></p>''' % (w_woodward.pk, w_bernstein.pk, bw.pk, w_royko.pk))
data = {
'writer': six.text_type(w_woodward.pk),
'age': '65',
}
form = WriterProfileForm(data)
instance = form.save()
self.assertEqual(six.text_type(instance), 'Bob Woodward is 65')
form = WriterProfileForm(instance=instance)
self.assertHTMLEqual(form.as_p(), '''<p><label for="id_writer">Writer:</label> <select name="writer" id="id_writer">
<option value="">---------</option>
<option value="%s" selected="selected">Bob Woodward</option>
<option value="%s">Carl Bernstein</option>
<option value="%s">Joe Better</option>
<option value="%s">Mike Royko</option>
</select></p>
<p><label for="id_age">Age:</label> <input type="number" name="age" value="65" id="id_age" min="0" /></p>''' % (w_woodward.pk, w_bernstein.pk, bw.pk, w_royko.pk))
def test_file_field(self):
# Test conditions when files is either not given or empty.
f = TextFileForm(data={'description': 'Assistance'})
self.assertEqual(f.is_valid(), False)
f = TextFileForm(data={'description': 'Assistance'}, files={})
self.assertEqual(f.is_valid(), False)
# Upload a file and ensure it all works as expected.
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test1.txt', b'hello world')})
self.assertEqual(f.is_valid(), True)
self.assertEqual(type(f.cleaned_data['file']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
instance.file.delete()
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test1.txt', b'hello world')})
self.assertEqual(f.is_valid(), True)
self.assertEqual(type(f.cleaned_data['file']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
# Check if the max_length attribute has been inherited from the model.
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test-maxlength.txt', b'hello world')})
self.assertEqual(f.is_valid(), False)
# Edit an instance that already has the file defined in the model. This will not
# save the file again, but leave it exactly as it is.
f = TextFileForm(
data={'description': 'Assistance'},
instance=instance)
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data['file'].name, 'tests/test1.txt')
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
# Override the file by uploading a new one.
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test2.txt', b'hello world')}, instance=instance)
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test2.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test2.txt', b'hello world')})
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test2.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
instance.delete()
# Test the non-required FileField
f = TextFileForm(data={'description': 'Assistance'})
f.fields['file'].required = False
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.file.name, '')
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test3.txt', b'hello world')}, instance=instance)
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test3.txt')
# Instance can be edited w/out re-uploading the file and existing file should be preserved.
f = TextFileForm(
data={'description': 'New Description'},
instance=instance)
f.fields['file'].required = False
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.description, 'New Description')
self.assertEqual(instance.file.name, 'tests/test3.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
instance.delete()
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test3.txt', b'hello world')})
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test3.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
instance.delete()
def test_big_integer_field(self):
bif = BigIntForm({'biggie': '-9223372036854775808'})
self.assertEqual(bif.is_valid(), True)
bif = BigIntForm({'biggie': '-9223372036854775809'})
self.assertEqual(bif.is_valid(), False)
self.assertEqual(bif.errors, {'biggie': ['Ensure this value is greater than or equal to -9223372036854775808.']})
bif = BigIntForm({'biggie': '9223372036854775807'})
self.assertEqual(bif.is_valid(), True)
bif = BigIntForm({'biggie': '9223372036854775808'})
self.assertEqual(bif.is_valid(), False)
self.assertEqual(bif.errors, {'biggie': ['Ensure this value is less than or equal to 9223372036854775807.']})
@skipUnless(test_images, "PIL not installed")
def test_image_field(self):
# ImageField and FileField are nearly identical, but they differ slighty when
# it comes to validation. This specifically tests that #6302 is fixed for
# both file fields and image fields.
with open(os.path.join(os.path.dirname(upath(__file__)), "test.png"), 'rb') as fp:
image_data = fp.read()
with open(os.path.join(os.path.dirname(upath(__file__)), "test2.png"), 'rb') as fp:
image_data2 = fp.read()
f = ImageFileForm(
data={'description': 'An image'},
files={'image': SimpleUploadedFile('test.png', image_data)})
self.assertEqual(f.is_valid(), True)
self.assertEqual(type(f.cleaned_data['image']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
f = ImageFileForm(
data={'description': 'An image'},
files={'image': SimpleUploadedFile('test.png', image_data)})
self.assertEqual(f.is_valid(), True)
self.assertEqual(type(f.cleaned_data['image']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Edit an instance that already has the (required) image defined in the model. This will not
# save the image again, but leave it exactly as it is.
f = ImageFileForm(data={'description': 'Look, it changed'}, instance=instance)
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data['image'].name, 'tests/test.png')
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.height, 16)
self.assertEqual(instance.width, 16)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
# Override the file by uploading a new one.
f = ImageFileForm(
data={'description': 'Changed it'},
files={'image': SimpleUploadedFile('test2.png', image_data2)}, instance=instance)
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test2.png')
self.assertEqual(instance.height, 32)
self.assertEqual(instance.width, 48)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
instance.delete()
f = ImageFileForm(
data={'description': 'Changed it'},
files={'image': SimpleUploadedFile('test2.png', image_data2)})
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test2.png')
self.assertEqual(instance.height, 32)
self.assertEqual(instance.width, 48)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
instance.delete()
# Test the non-required ImageField
# Note: In Oracle, we expect a null ImageField to return '' instead of
# None.
if connection.features.interprets_empty_strings_as_nulls:
expected_null_imagefield_repr = ''
else:
expected_null_imagefield_repr = None
f = OptionalImageFileForm(data={'description': 'Test'})
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.image.name, expected_null_imagefield_repr)
self.assertEqual(instance.width, None)
self.assertEqual(instance.height, None)
f = OptionalImageFileForm(
data={'description': 'And a final one'},
files={'image': SimpleUploadedFile('test3.png', image_data)}, instance=instance)
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test3.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Editing the instance without re-uploading the image should not affect the image or its width/height properties
f = OptionalImageFileForm(
data={'description': 'New Description'},
instance=instance)
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.description, 'New Description')
self.assertEqual(instance.image.name, 'tests/test3.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Delete the current file since this is not done by Django.
instance.image.delete()
instance.delete()
f = OptionalImageFileForm(
data={'description': 'And a final one'},
files={'image': SimpleUploadedFile('test4.png', image_data2)}
)
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test4.png')
self.assertEqual(instance.width, 48)
self.assertEqual(instance.height, 32)
instance.delete()
# Test callable upload_to behavior that's dependent on the value of another field in the model
f = ImageFileForm(
data={'description': 'And a final one', 'path': 'foo'},
files={'image': SimpleUploadedFile('test4.png', image_data)})
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.image.name, 'foo/test4.png')
instance.delete()
def test_media_on_modelform(self):
# Similar to a regular Form class you can define custom media to be used on
# the ModelForm.
f = ModelFormWithMedia()
self.assertHTMLEqual(six.text_type(f.media), '''<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/form/javascript"></script>''')
f = CommaSeparatedIntegerForm({'field': '1,2,3'})
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data, {'field': '1,2,3'})
f = CommaSeparatedIntegerForm({'field': '1a,2'})
self.assertEqual(f.errors, {'field': ['Enter only digits separated by commas.']})
f = CommaSeparatedIntegerForm({'field': ',,,,'})
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data, {'field': ',,,,'})
f = CommaSeparatedIntegerForm({'field': '1.2'})
self.assertEqual(f.errors, {'field': ['Enter only digits separated by commas.']})
f = CommaSeparatedIntegerForm({'field': '1,a,2'})
self.assertEqual(f.errors, {'field': ['Enter only digits separated by commas.']})
f = CommaSeparatedIntegerForm({'field': '1,,2'})
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data, {'field': '1,,2'})
f = CommaSeparatedIntegerForm({'field': '1'})
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data, {'field': '1'})
# This Price instance generated by this form is not valid because the quantity
# field is required, but the form is valid because the field is excluded from
# the form. This is for backwards compatibility.
form = PriceFormWithoutQuantity({'price': '6.00'})
self.assertEqual(form.is_valid(), True)
price = form.save(commit=False)
with self.assertRaises(ValidationError):
price.full_clean()
# The form should not validate fields that it doesn't contain even if they are
# specified using 'fields', not 'exclude'.
class Meta:
model = Price
fields = ('price',)
form = PriceFormWithoutQuantity({'price': '6.00'})
self.assertEqual(form.is_valid(), True)
# The form should still have an instance of a model that is not complete and
# not saved into a DB yet.
self.assertEqual(form.instance.price, Decimal('6.00'))
self.assertEqual(form.instance.quantity is None, True)
self.assertEqual(form.instance.pk is None, True)
# Choices on CharField and IntegerField
f = ArticleForm()
with self.assertRaises(ValidationError):
f.fields['status'].clean('42')
f = ArticleStatusForm()
with self.assertRaises(ValidationError):
f.fields['status'].clean('z')
def test_foreignkeys_which_use_to_field(self):
apple = Inventory.objects.create(barcode=86, name='Apple')
pear = Inventory.objects.create(barcode=22, name='Pear')
core = Inventory.objects.create(barcode=87, name='Core', parent=apple)
field = forms.ModelChoiceField(Inventory.objects.all(), to_field_name='barcode')
self.assertEqual(tuple(field.choices), (
('', '---------'),
(86, 'Apple'),
(87, 'Core'),
(22, 'Pear')))
form = InventoryForm(instance=core)
self.assertHTMLEqual(six.text_type(form['parent']), '''<select name="parent" id="id_parent">
<option value="">---------</option>
<option value="86" selected="selected">Apple</option>
<option value="87">Core</option>
<option value="22">Pear</option>
</select>''')
data = model_to_dict(core)
data['parent'] = '22'
form = InventoryForm(data=data, instance=core)
core = form.save()
self.assertEqual(core.parent.name, 'Pear')
class CategoryForm(forms.ModelForm):
description = forms.CharField()
class Meta:
model = Category
fields = ['description', 'url']
self.assertEqual(list(CategoryForm.base_fields),
['description', 'url'])
self.assertHTMLEqual(six.text_type(CategoryForm()), '''<tr><th><label for="id_description">Description:</label></th><td><input type="text" name="description" id="id_description" /></td></tr>
<tr><th><label for="id_url">The URL:</label></th><td><input id="id_url" type="text" name="url" maxlength="40" /></td></tr>''')
# to_field_name should also work on ModelMultipleChoiceField ##################
field = forms.ModelMultipleChoiceField(Inventory.objects.all(), to_field_name='barcode')
self.assertEqual(tuple(field.choices), ((86, 'Apple'), (87, 'Core'), (22, 'Pear')))
self.assertQuerysetEqual(field.clean([86]), ['Apple'])
form = SelectInventoryForm({'items': [87, 22]})
self.assertEqual(form.is_valid(), True)
self.assertEqual(len(form.cleaned_data), 1)
self.assertQuerysetEqual(form.cleaned_data['items'], ['Core', 'Pear'])
def test_model_field_that_returns_none_to_exclude_itself_with_explicit_fields(self):
self.assertEqual(list(CustomFieldForExclusionForm.base_fields),
['name'])
self.assertHTMLEqual(six.text_type(CustomFieldForExclusionForm()),
'''<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" maxlength="10" /></td></tr>''')
def test_iterable_model_m2m(self) :
colour = Colour.objects.create(name='Blue')
form = ColourfulItemForm()
self.maxDiff = 1024
self.assertHTMLEqual(
form.as_p(),
"""<p><label for="id_name">Name:</label> <input id="id_name" type="text" name="name" maxlength="50" /></p>
<p><label for="id_colours">Colours:</label> <select multiple="multiple" name="colours" id="id_colours">
<option value="%(blue_pk)s">Blue</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></p>"""
% {'blue_pk': colour.pk})
def test_custom_error_messages(self) :
data = {'name1': '@#$!!**@#$', 'name2': '@#$!!**@#$'}
errors = CustomErrorMessageForm(data).errors
self.assertHTMLEqual(
str(errors['name1']),
'<ul class="errorlist"><li>Form custom error message.</li></ul>'
)
self.assertHTMLEqual(
str(errors['name2']),
'<ul class="errorlist"><li>Model custom error message.</li></ul>'
)
class M2mHelpTextTest(TestCase):
"""Tests for ticket #9321."""
def test_multiple_widgets(self):
"""Help text of different widgets for ManyToManyFields model fields"""
dreaded_help_text = '<span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span>'
# Default widget (SelectMultiple):
std_form = StatusNoteForm()
self.assertInHTML(dreaded_help_text, std_form.as_p())
# Overridden widget (CheckboxSelectMultiple, a subclass of
# SelectMultiple but with a UI that doesn't involve Control/Command
# keystrokes to extend selection):
form = StatusNoteCBM2mForm()
html = form.as_p()
self.assertInHTML('<ul id="id_status">', html)
self.assertInHTML(dreaded_help_text, html, count=0)
|
{
"content_hash": "098b6c193a5f4c6d82aff79037bcedaf",
"timestamp": "",
"source": "github",
"line_count": 1796,
"max_line_length": 219,
"avg_line_length": 42.224387527839646,
"alnum_prop": 0.5999868134766269,
"repo_name": "dex4er/django",
"id": "e33372f0badd20317feddda7edbb272d0c5b02cb",
"size": "75835",
"binary": false,
"copies": "10",
"ref": "refs/heads/1.6.x",
"path": "tests/model_forms/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52958"
},
{
"name": "JavaScript",
"bytes": "102431"
},
{
"name": "Python",
"bytes": "9528070"
},
{
"name": "Shell",
"bytes": "12137"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
__all__ = ['DocSection', 'Endpoint', 'StatsMixin']
import time
from datetime import datetime, timedelta
from django.conf import settings
from django.utils.http import urlquote
from django.views.decorators.csrf import csrf_exempt
from enum import Enum
from pytz import utc
from rest_framework.authentication import SessionAuthentication
from rest_framework.parsers import JSONParser
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from rest_framework.views import APIView
from sentry.app import raven, tsdb
from sentry.models import ApiKey, AuditLogEntry
from sentry.utils.cursors import Cursor
from sentry.utils.http import is_valid_origin
from .authentication import ApiKeyAuthentication, ProjectKeyAuthentication
from .paginator import Paginator
from .permissions import NoPermission
ONE_MINUTE = 60
ONE_HOUR = ONE_MINUTE * 60
ONE_DAY = ONE_HOUR * 24
LINK_HEADER = '<{uri}&cursor={cursor}>; rel="{name}"; results="{has_results}"; cursor="{cursor}"'
DEFAULT_AUTHENTICATION = (
ApiKeyAuthentication,
ProjectKeyAuthentication,
SessionAuthentication
)
class DocSection(Enum):
ACCOUNTS = 'Accounts'
EVENTS = 'Events'
ORGANIZATIONS = 'Organizations'
PROJECTS = 'Projects'
RELEASES = 'Releases'
TEAMS = 'Teams'
class Endpoint(APIView):
authentication_classes = DEFAULT_AUTHENTICATION
renderer_classes = (JSONRenderer,)
parser_classes = (JSONParser,)
permission_classes = (NoPermission,)
def build_cursor_link(self, request, name, cursor):
querystring = u'&'.join(
u'{0}={1}'.format(urlquote(k), urlquote(v))
for k, v in request.GET.iteritems()
if k != 'cursor'
)
base_url = request.build_absolute_uri(request.path)
if querystring:
base_url = '{0}?{1}'.format(base_url, querystring)
else:
base_url = base_url + '?'
return LINK_HEADER.format(
uri=base_url,
cursor=str(cursor),
name=name,
has_results='true' if bool(cursor) else 'false',
)
def convert_args(self, request, *args, **kwargs):
return (args, kwargs)
def handle_exception(self, request, exc):
try:
return super(Endpoint, self).handle_exception(exc)
except Exception as exc:
import sys
import traceback
sys.stderr.write(traceback.format_exc())
event = raven.captureException(request=request)
if event:
event_id = raven.get_ident(event)
else:
event_id = None
context = {
'detail': 'Internal Error',
'errorId': event_id,
}
return Response(context, status=500)
def create_audit_entry(self, request, **kwargs):
user = request.user if request.user.is_authenticated() else None
api_key = request.auth if isinstance(request.auth, ApiKey) else None
AuditLogEntry.objects.create(
actor=user,
actor_key=api_key,
ip_address=request.META['REMOTE_ADDR'],
**kwargs
)
@csrf_exempt
def dispatch(self, request, *args, **kwargs):
"""
Identical to rest framework's dispatch except we add the ability
to convert arguments (for common URL params).
"""
self.args = args
self.kwargs = kwargs
request = self.initialize_request(request, *args, **kwargs)
self.request = request
self.headers = self.default_response_headers # deprecate?
if settings.SENTRY_API_RESPONSE_DELAY:
time.sleep(settings.SENTRY_API_RESPONSE_DELAY / 1000.0)
try:
self.initial(request, *args, **kwargs)
# Get the appropriate handler method
if request.method.lower() in self.http_method_names:
handler = getattr(self, request.method.lower(),
self.http_method_not_allowed)
(args, kwargs) = self.convert_args(request, *args, **kwargs)
self.args = args
self.kwargs = kwargs
else:
handler = self.http_method_not_allowed
response = handler(request, *args, **kwargs)
except Exception as exc:
response = self.handle_exception(request, exc)
self.response = self.finalize_response(request, response, *args, **kwargs)
return self.response
def finalize_response(self, request, response, *args, **kwargs):
response = super(Endpoint, self).finalize_response(
request, response, *args, **kwargs
)
self.add_cors_headers(request, response)
return response
def add_cors_headers(self, request, response):
if not request.auth:
return
origin = request.META.get('HTTP_ORIGIN')
if not origin:
return
allowed_origins = request.auth.get_allowed_origins()
if is_valid_origin(origin, allowed=allowed_origins):
response['Access-Control-Allow-Origin'] = origin
response['Access-Control-Allow-Methods'] = ', '.join(self.http_method_names)
return
def paginate(self, request, on_results=None, paginator_cls=Paginator,
**kwargs):
per_page = int(request.GET.get('per_page', 100))
input_cursor = request.GET.get('cursor')
if input_cursor:
input_cursor = Cursor.from_string(input_cursor)
assert per_page <= 100
paginator = paginator_cls(**kwargs)
cursor_result = paginator.get_result(
limit=per_page,
cursor=input_cursor,
)
# map results based on callback
if on_results:
results = on_results(cursor_result.results)
headers = {}
headers['Link'] = ', '.join([
self.build_cursor_link(request, 'previous', cursor_result.prev),
self.build_cursor_link(request, 'next', cursor_result.next),
])
return Response(results, headers=headers)
class StatsMixin(object):
def _parse_args(self, request):
resolution = request.GET.get('resolution')
if resolution:
resolution = self._parse_resolution(resolution)
assert any(r for r in tsdb.rollups if r[0] == resolution)
end = request.GET.get('until')
if end:
end = datetime.fromtimestamp(float(end)).replace(tzinfo=utc)
else:
end = datetime.utcnow().replace(tzinfo=utc)
start = request.GET.get('since')
if start:
start = datetime.fromtimestamp(float(start)).replace(tzinfo=utc)
else:
start = end - timedelta(days=1, seconds=-1)
return {
'start': start,
'end': end,
'rollup': resolution,
}
def _parse_resolution(self, value):
if value.endswith('h'):
return int(value[:-1]) * ONE_HOUR
elif value.endswith('d'):
return int(value[:-1]) * ONE_DAY
elif value.endswith('m'):
return int(value[:-1]) * ONE_MINUTE
elif value.endswith('s'):
return int(value[:-1])
else:
raise ValueError(value)
|
{
"content_hash": "412831baf79f360d0beeba7a05931620",
"timestamp": "",
"source": "github",
"line_count": 233,
"max_line_length": 97,
"avg_line_length": 31.71244635193133,
"alnum_prop": 0.6012992285830289,
"repo_name": "vperron/sentry",
"id": "1891d3b4236aedb550a83e9b90af60e649888a61",
"size": "7389",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/sentry/api/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "583430"
},
{
"name": "HTML",
"bytes": "314106"
},
{
"name": "JavaScript",
"bytes": "624672"
},
{
"name": "Makefile",
"bytes": "2660"
},
{
"name": "Python",
"bytes": "6274238"
}
],
"symlink_target": ""
}
|
import qhsm
from qhsm import QSignals, QEvent
# generated by PythonGenerator version 0.1
class TestSample1(qhsm.QHsm):
def initialiseStateMachine(self):
self.initialiseState(self.s_StateX)
def s_StateX(self, ev):
if ev.QSignal == QSignals.Entry:
self.enterStateX()
elif ev.QSignal == QSignals.Exit:
self.exitStateX()
elif ev.QSignal == QSignals.Init:
self.initialiseState(self.s_State0)
else:
return self._TopState
return None
def s_State0(self, ev):
if ev.QSignal == "Bye":
pass
self.transitionTo(self.s_State1)
elif ev.QSignal == "Hello":
if self.Ok(ev):
self.sayHello3()
self.transitionTo(self.s_State0)
else:
self.sayHello1()
self.transitionTo(self.s_State1)
elif ev.QSignal == QSignals.Entry:
self.enterState0()
elif ev.QSignal == QSignals.Exit:
self.exitState0()
else:
return self.s_StateX
return None
def s_State1(self, ev):
if ev.QSignal == "Hello":
self.sayHello2()
self.transitionTo(self.s_State0)
elif ev.QSignal == QSignals.Entry:
self.enterState1()
elif ev.QSignal == QSignals.Exit:
self.exitState1()
else:
return self._TopState
return None
#end of TestSample1
pass
|
{
"content_hash": "8fc0a6c033603135ffd4fe1dc2af855b",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 48,
"avg_line_length": 25.96551724137931,
"alnum_prop": 0.547808764940239,
"repo_name": "poobalan-arumugam/stateproto",
"id": "9d7cc0c21fb675fa25cd867f98fa29dddb573119",
"size": "1506",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/extensions/lang/python/qhsm/testsamplehsm1.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "C#",
"bytes": "503476"
},
{
"name": "HTML",
"bytes": "1290"
},
{
"name": "JavaScript",
"bytes": "14591"
},
{
"name": "Python",
"bytes": "48645"
},
{
"name": "Ruby",
"bytes": "11512"
},
{
"name": "Shell",
"bytes": "122"
}
],
"symlink_target": ""
}
|
unittest.skip
#segfault ....
def omp_parallel_num_threads():
import omp
max_threads = 0
failed = 0
if 'omp parallel':
if 'omp master':
max_threads = omp.get_num_threads()
for threads in xrange(1, max_threads + 1):
nthreads = 0
if 'omp parallel reduction(+:failed) num_threads(threads)':
failed += (threads != omp.get_num_threads())
'omp atomic'
nthreads += 1
failed += (nthreads != threads)
return not failed
|
{
"content_hash": "1f3a2d7271b393a6d69fe0f8356781b7",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 67,
"avg_line_length": 25.8,
"alnum_prop": 0.5581395348837209,
"repo_name": "artas360/pythran",
"id": "2581cac9ae59ff292e77b2c3cc8c99bb84272675",
"size": "516",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pythran/tests/openmp.legacy/omp_parallel_num_threads.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "1335689"
},
{
"name": "Makefile",
"bytes": "1185"
},
{
"name": "Python",
"bytes": "1162293"
},
{
"name": "Shell",
"bytes": "250"
}
],
"symlink_target": ""
}
|
from oslo_log import log as logging
from nca47.common.exception import NonExistParam, ParamNull, ParamValueError
from nca47.common.i18n import _
from nca47.common.i18n import _LE
from nca47.manager import central
from oslo_messaging.exceptions import MessagingException
from nca47.common.exception import BadRequest
from nca47.common.exception import Nca47Exception
from oslo_serialization import jsonutils as json
from nca47.api.controllers.v1 import tools
from nca47.api.controllers.v1.firewall import fw_base
LOG = logging.getLogger(__name__)
class AddrObjController(fw_base.BaseRestController):
"""
nca47 addrobj class, using for add/delete/update/query the addrobj info,
validate parameters whether is legal, handling DB operations and calling
rpc client's corresponding method to send messaging to agent endpoints
"""
def __init__(self):
self.manager = central.CentralManager.get_instance()
super(AddrObjController, self).__init__()
def create(self, req, *args, **kwargs):
"""create the addrobj"""
url = req.url
try:
# get the right url
if len(args) != 1:
raise BadRequest(resource="addrobj operation", msg=url)
# get the body
json_body = req.body
# get the context
context = req.context
values = json.loads(json_body)
# check the in values
valid_attributes = ['tenant_id', 'dc_name', 'network_zone',
'ip', 'name', 'vfwname']
# check the in values
recom_msg = self.validat_values(values, valid_attributes)
LOG.info(_("the in value body is %(body)s"), {"body": values})
# from rpc server create the addrobj in db and device
addrobj = self.manager.add_addrobj(context, recom_msg)
except Nca47Exception as e:
self.response.status = e.code
LOG.error(_LE('Error exception! error info: %' + e.message))
LOG.exception(e)
return tools.ret_info(e.code, e.message)
except MessagingException as exception:
self.response.status = 500
message = exception.value
return tools.ret_info(self.response.status, message)
except Exception as exception:
LOG.exception(exception)
self.response.status = 500
message = "the values of the body format error"
return tools.ret_info(self.response.status, message)
return addrobj
def remove(self, req, *args, **kwargs):
"""del the addrobj"""
url = req.url
try:
if len(args) != 1:
raise BadRequest(resource="addrobj operation", msg=url)
# get the body
json_body = req.body
# get the context
context = req.context
values = json.loads(json_body)
# check the in values
valid_attributes = ['tenant_id', 'dc_name', 'id', 'network_zone',
'vfwname']
# check the in values
recom_msg = self.validat_values(values, valid_attributes)
LOG.info(_("the in value body is %(body)s"), {"body": values})
# from rpc server delete the addrobj in db and device
addrobj = self.manager.del_addrobj(context, recom_msg)
except Nca47Exception as e:
self.response.status = e.code
LOG.error(_LE('Error exception! error info: %' + e.message))
LOG.exception(e)
return tools.ret_info(e.code, e.message)
except MessagingException as exception:
self.response.status = 500
message = exception.value
return tools.ret_info(self.response.status, message)
except Exception as exception:
LOG.exception(exception)
self.response.status = 500
message = "the values of the body format error"
return tools.ret_info(self.response.status, message)
return addrobj
def show(self, req, *args, **kwargs):
"""get the one addrobj"""
url = req.url
try:
# get the right url
if len(args) != 1:
raise BadRequest(resource="addrobj operation", msg=url)
# get the body
json_body = req.body
# get the context
context = req.context
values = json.loads(json_body)
# check the in values
valid_attributes = ['id']
# check the in values
recom_msg = self.validat_values(values, valid_attributes)
LOG.info(_("the in value body is %(body)s"), {"body": values})
# from rpc server get the addrobj in db and device
addrobj = self.manager.get_addrobj(context, recom_msg)
except Nca47Exception as e:
self.response.status = e.code
LOG.error(_LE('Error exception! error info: %' + e.message))
LOG.exception(e)
return tools.ret_info(e.code, e.message)
except MessagingException as exception:
self.response.status = 500
message = exception.value
return tools.ret_info(self.response.status, message)
except Exception as exception:
LOG.exception(exception)
self.response.status = 500
message = "the values of the body format error"
return tools.ret_info(self.response.status, message)
return addrobj
def list(self, req, *args, **kwargs):
"""get the all addrobj"""
url = req.url
addrobj_name_list = []
try:
# get the right url
if len(args) != 1:
raise BadRequest(resource="addrobj operation", msg=url)
# get the body
json_body = req.body
# get the context
context = req.context
values = json.loads(json_body)
# check the in values
# ADTEC_request: should be vfwname, not vfw_id
valid_attributes = ['vfwname', 'tenant_id', 'dc_name',
'network_zone']
# check the in values
recom_msg = self.validat_values(values, valid_attributes)
LOG.info(_("the in value body is %(body)s"), {"body": values})
# from rpc server get the addrobj in db and device
addrobjs = self.manager.get_addrobjs(context, recom_msg)
except Nca47Exception as e:
self.response.status = e.code
LOG.error(_LE('Error exception! error info: %' + e.message))
LOG.exception(e)
return tools.ret_info(e.code, e.message)
except MessagingException as exception:
self.response.status = 500
message = exception.value
return tools.ret_info(self.response.status, message)
except Exception as exception:
LOG.exception(exception)
self.response.status = 500
message = "the values of the body format error"
return tools.ret_info(self.response.status, message)
return addrobjs
def validat_values(self, values, valid_keys):
"""Non null input parameters"""
recom_msg = {}
for key in valid_keys:
# check the IP get
if key == 'ip':
if not tools._is_valid_ipv4_addr(values[key]):
raise ParamValueError(param_name=key)
if key not in values.keys():
raise NonExistParam(param_name=key)
else:
recom_msg[key] = values[key]
if values[key] is None:
raise ParamNull(param_name=key)
return recom_msg
|
{
"content_hash": "edc37809fb609df812a7620bfffa4ae8",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 77,
"avg_line_length": 42.02150537634409,
"alnum_prop": 0.5772773797338793,
"repo_name": "willowd878/nca47",
"id": "924317e64075548d525c43951c5d4fc58fe72bbb",
"size": "7816",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nca47/api/controllers/v1/firewall/fw_addrobj.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "43669"
}
],
"symlink_target": ""
}
|
import pandas as pd
import numpy as np
file = "/Users/szabolcs/dev/git/DAT210x/Module6/Datasets/parkinsons.data"
X = pd.read_csv(file)
y = X[["status"]].values.reshape(-1, 1)
X = X.drop(["name", "status"], axis=1)
print(X.head())
print("X", X.shape)
print("y", y.shape)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=7)
from sklearn.preprocessing import StandardScaler
#norm = KernelCenterer()
#best_score 0.915254237288
#C 1.7
#gamma 0.006
norm = StandardScaler()
#best_score 0.932203389831
#C 1.55
#gamma 0.097
norm.fit(X_train)
X_train = norm.transform(X_train)
X_test = norm.transform(X_test)
from sklearn.svm import SVC
model = SVC()
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print(score)
best_score = 0
from sklearn.manifold import Isomap
for n_neighbors in range(2, 6):
for n_components in range(4, 7):
pca = Isomap(n_neighbors=n_neighbors, n_components=n_components)
pca.fit(X_train)
X_train = pca.transform(X_train)
X_test = pca.transform(X_test)
for C in np.arange(0.05, 2, 0.05):
for gamma in np.arange(0.001, 0.1, 0.001):
model = SVC(C=C, gamma=gamma)
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
if score > best_score:
best_score = score
print("best_score", best_score)
print("C", C)
print("gamma", gamma)
print("n_neighbors", n_neighbors)
print("n_components", n_components)
|
{
"content_hash": "abbb4915ee30c8e0d1964d1e9df4f11c",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 88,
"avg_line_length": 26.9,
"alnum_prop": 0.541157727031333,
"repo_name": "szigyi/DAT210x",
"id": "0b305a55fe6f09778aea460d88bf3cf955764c8a",
"size": "1971",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Module6/assignment3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "710003"
},
{
"name": "Python",
"bytes": "128432"
}
],
"symlink_target": ""
}
|
"""
pyexcel_io.database.django
~~~~~~~~~~~~~~~~~~~
The lower level handler for django import and export
:copyright: (c) 2014-2017 by Onni Software Ltd.
:license: New BSD License, see LICENSE for more details
"""
import logging
from pyexcel_io.book import BookReader, BookWriter
from pyexcel_io.sheet import SheetWriter
from pyexcel_io.utils import is_empty_array, swap_empty_string_for_none
import pyexcel_io.constants as constants
from pyexcel_io.database.querysets import QuerysetsReader
from ._common import TableExportAdapter, TableExporter
from ._common import TableImporter, TableImportAdapter
log = logging.getLogger(__name__)
class DjangoModelReader(QuerysetsReader):
"""Read from django model
"""
def __init__(self, model, export_columns=None, **keywords):
self.__model = model
if export_columns:
column_names = export_columns
else:
column_names = sorted(
[field.attname
for field in self.__model._meta.concrete_fields])
QuerysetsReader.__init__(self, self.__model.objects.all(),
column_names,
**keywords)
class DjangoModelWriter(SheetWriter):
def __init__(self, adapter, batch_size=None):
self.__batch_size = batch_size
self.__model = adapter.model
self.__column_names = adapter.column_names
self.__mapdict = adapter.column_name_mapping_dict
self.__initializer = adapter.row_initializer
self.__objs = []
def write_row(self, array):
if is_empty_array(array):
print(constants.MESSAGE_EMPTY_ARRAY)
else:
new_array = swap_empty_string_for_none(array)
model_to_be_created = new_array
if self.__initializer is not None:
model_to_be_created = self.__initializer(new_array)
if model_to_be_created:
self.__objs.append(self.__model(**dict(
zip(self.__column_names, model_to_be_created)
)))
# else
# skip the row
def close(self):
try:
self.__model.objects.bulk_create(self.__objs,
batch_size=self.__batch_size)
except Exception as e:
log.info(constants.MESSAGE_DB_EXCEPTION)
log.info(e)
for object in self.__objs:
try:
object.save()
except Exception as e2:
log.info(constants.MESSAGE_IGNORE_ROW)
log.info(e2)
log.info(object)
continue
class DjangoModelExportAdapter(TableExportAdapter):
pass
class DjangoModelExporter(TableExporter):
pass
class DjangoBookReader(BookReader):
def open(self, file_name, **keywords):
raise NotImplementedError()
def open_stream(self, file_stream, **keywords):
raise NotImplementedError()
def open_content(self, file_content, **keywords):
self.exporter = file_content
self._load_from_django_models()
def read_sheet(self, native_sheet):
reader = DjangoModelReader(native_sheet.model,
native_sheet.export_columns)
return reader.to_array()
def _load_from_django_models(self):
self._native_book = self.exporter.adapters
class DjangoModelImportAdapter(TableImportAdapter):
pass
class DjangoModelImporter(TableImporter):
pass
class DjangoBookWriter(BookWriter):
def open_content(self, file_content, **keywords):
self.importer = file_content
self._keywords = keywords
def create_sheet(self, sheet_name):
sheet_writer = None
model = self.importer.get(sheet_name)
if model:
sheet_writer = DjangoModelWriter(
model,
batch_size=self._keywords.get('batch_size', None))
return sheet_writer
_registry = {
"file_type": constants.DB_DJANGO,
"reader": DjangoBookReader,
"writer": DjangoBookWriter,
"stream_type": "special",
"library": "built-in"
}
exports = (_registry,)
|
{
"content_hash": "6a93e87891f7ccd717cd1c6046dec386",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 74,
"avg_line_length": 30.142857142857142,
"alnum_prop": 0.5959715639810427,
"repo_name": "fuhrysteve/pyexcel-io",
"id": "df48623ea954384d41c7234e47b972d10fe8bedd",
"size": "4220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyexcel_io/database/django.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "223"
},
{
"name": "Makefile",
"bytes": "145"
},
{
"name": "Python",
"bytes": "146729"
},
{
"name": "Shell",
"bytes": "223"
}
],
"symlink_target": ""
}
|
import os
import re
from xml.etree import ElementTree
import asninja.helpers
class AtmelStudioProject(object):
NSMAP = {'msb': 'http://schemas.microsoft.com/developer/msbuild/2003'}
def __init__(self, file_name, output):
self.prj = ElementTree.parse(file_name)
self.config_group = None
self.is_cpp = None
self.is_lib = None
self.output_name = None
self.output_ext = None
self.toolchain_settings = None
self.ref_libs = None
self.detect(output)
def detect(self, output):
if not self.prj:
pass
key = self.prj.find('.//msb:PropertyGroup/msb:SchemaVersion', self.NSMAP)
assert (key is not None) and (key.text == '2.0'), 'Unsupported project schema version'
key = self.prj.find('.//msb:PropertyGroup/msb:Language', self.NSMAP)
self.is_cpp = key.text == 'CPP'
key = self.prj.find('.//msb:PropertyGroup/msb:OutputType', self.NSMAP)
self.is_lib = key.text == 'StaticLibrary'
key = self.prj.find('.//msb:PropertyGroup/msb:OutputFileName', self.NSMAP)
self.output_name = key.text.replace('$(MSBuildProjectName)', output)
key = self.prj.find('.//msb:PropertyGroup/msb:OutputFileExtension', self.NSMAP)
self.output_ext = key.text
self.toolchain_settings = 'ArmGccCpp' if self.is_cpp else 'ArmGcc'
self.ref_libs = []
for node in self.prj.findall('.//msb:ItemGroup/msb:ProjectReference', self.NSMAP):
path, prj_name = os.path.split(node.attrib['Include'])
raw_name, __ = os.path.splitext(prj_name)
self.ref_libs.append(RefLibrary(path.replace('\\', '/'), raw_name))
def output(self):
assert self.output_name is not None
assert self.output_ext
return self.output_name + self.output_ext
def toolchain_id(self):
key = self.prj.find('.//msb:PropertyGroup/msb:ProjectVersion', self.NSMAP)
prj_version = key.text
key = self.prj.find('.//msb:PropertyGroup/msb:ToolchainName', self.NSMAP)
toolchain_name = key.text
key = self.prj.find('.//msb:PropertyGroup/msb:ToolchainFlavour', self.NSMAP)
toolchain_flavour = key.text
return prj_version, toolchain_name, toolchain_flavour
def select_config(self, config_name):
self.config_group = None
for group in self.prj.findall('msb:PropertyGroup', self.NSMAP):
if group.attrib.get('Condition', '__').strip() == "'$(Configuration)' == '{}'".format(config_name):
self.config_group = group
break
return self.config_group is not None
def key_raw(self, name):
assert self.config_group is not None
key_xpath = './/msb:{}/msb:{}'.format(self.toolchain_settings, name)
return self.config_group.find(key_xpath, self.NSMAP)
def key_as_bool(self, name, default=False):
assert self.config_group is not None
key = self.key_raw(name)
return key.text == 'True' if key is not None else default
def key_as_str(self, name, fmt, default=''):
assert self.config_group is not None
key = self.key_raw(name)
return fmt.format(key.text) if key is not None else default
def key_as_strlist(self, name, fmt):
assert self.config_group is not None
s = []
key_xpath = './/msb:{}/msb:{}/msb:ListValues/msb:Value'.format(self.toolchain_settings, name)
for key in self.config_group.findall(key_xpath, self.NSMAP):
s.append(fmt.format(key.text))
return s
def src_files(self):
src_files = []
for node in self.prj.findall('.//msb:ItemGroup/msb:Compile', self.NSMAP):
src_files.append(node.attrib['Include'].replace('\\', '/'))
return src_files
def compiler_flags(self, c_compiler, add_defs, del_defs, add_undefs):
assert self.config_group is not None
assert isinstance(add_defs, list)
assert isinstance(del_defs, list)
assert isinstance(add_undefs, list)
flags = []
prefix = 'armgcc' if c_compiler else 'armgcccpp'
prefix += '.compiler.'
# General
if self.key_as_bool(prefix + 'general.ChangeDefaultCharTypeUnsigned'):
flags.append('-funsigned-char')
if self.key_as_bool(prefix + 'general.ChangeDefaultBitFieldUnsigned'):
flags.append('-funsigned-bitfields')
# Preprocessor
if self.key_as_bool(prefix + 'general.DoNotSearchSystemDirectories'):
flags.append('-nostdinc')
if self.key_as_bool(prefix + 'general.PreprocessOnly'):
flags.append('-E')
# Symbols
inc_defs = asninja.helpers.strip_empty_symbols(add_defs)
inc_defs += self.key_as_strlist(prefix + 'symbols.DefSymbols', '{}')
for del_def in del_defs:
if inc_defs.count(del_def) > 0:
assert inc_defs.count(del_def) == 1
inc_defs.remove(del_def)
flags.extend('-D{}'.format(inc_def) for inc_def in inc_defs)
inc_undefs = asninja.helpers.strip_empty_symbols(add_undefs)
inc_undefs += self.key_as_strlist(prefix + 'preprocessor.UndefSymbols', '{}')
flags.extend('-U{}'.format(inc_undef) for inc_undef in inc_undefs)
# Directories
# if self.key_as_bool(prefix + 'directories.DefaultIncludePath', True):
# flags += []
flags += self.key_as_strlist(prefix + 'directories.IncludePaths', '-I"{}"')
# Optimization
# Optimization Level: -O[0,1,2,3,s]
key = self.key_raw(prefix + 'optimization.level')
if key is not None:
opt_level = re.search('(-O[0|1|2|3|s])', key.text)
if opt_level:
flags.append(opt_level.group(0))
else:
flags.append('-O0')
flags += [self.key_as_str(prefix + 'optimization.OtherFlags', '{}')]
if self.key_as_bool(prefix + 'optimization.PrepareFunctionsForGarbageCollection'):
flags.append('-ffunction-sections')
if self.key_as_bool(prefix + 'optimization.PrepareDataForGarbageCollection'):
flags.append('-fdata-sections')
if self.key_as_bool(prefix + 'optimization.EnableUnsafeMatchOptimizations'):
flags.append('-funsafe-math-optimizations')
if self.key_as_bool(prefix + 'optimization.EnableFastMath'):
flags.append('-ffast-math')
if self.key_as_bool(prefix + 'optimization.GeneratePositionIndependentCode'):
flags.append('-fpic')
if not c_compiler:
if self.key_as_bool(prefix + 'optimization.UseSectionRoDataForReadOnlyData', False):
flags.append('-muse-rodata-section')
if self.key_as_bool(prefix + 'optimization.DisableRTTI', True):
flags.append('-fno-rtti')
if self.key_as_bool(prefix + 'optimization.DisableExceptions', True):
flags.append('-fno-exceptions')
if self.key_as_bool(prefix + 'optimization.EnableLongCalls', True):
flags.append('-mlong-calls')
# Debugging
# Debug Level: None and -g[1,2,3]
key = self.key_raw(prefix + 'optimization.DebugLevel')
if key is not None:
debug_level = re.search('-g[1|2|3]', key.text)
if debug_level:
flags.append(debug_level.group(0))
flags.append(self.key_as_str(prefix + 'optimization.OtherDebuggingFlags', '{}'))
if self.key_as_bool(prefix + 'optimization.GenerateGprofInformation'):
flags.append('-pg')
if self.key_as_bool(prefix + 'optimization.GenerateProfInformation'):
flags.append('-p')
# Warnings
if self.key_as_bool(prefix + 'warnings.AllWarnings'):
flags.append('-Wall')
if self.key_as_bool(prefix + 'warnings.ExtraWarnings'):
flags.append('-Wextra')
if self.key_as_bool(prefix + 'warnings.Undefined'):
flags.append('-Wundef')
if self.key_as_bool(prefix + 'warnings.WarningsAsErrors'):
flags.append('-Werror')
if self.key_as_bool(prefix + 'warnings.CheckSyntaxOnly'):
flags.append('-fsyntax-only')
if self.key_as_bool(prefix + 'warnings.Pedantic'):
flags.append('-pedantic')
if self.key_as_bool(prefix + 'warnings.PedanticWarningsAsErrors'):
flags.append('-pedantic-errors')
if self.key_as_bool(prefix + 'warnings.InhibitAllWarnings'):
flags.append('-w')
# Miscellaneous
flags.append(self.key_as_str(prefix + 'miscellaneous.OtherFlags', '{}'))
if self.key_as_bool(prefix + 'miscellaneous.Verbose'):
flags.append('-v')
if self.key_as_bool(prefix + 'miscellaneous.SupportAnsiPrograms'):
flags.append('-ansi')
return flags
def linker_flags(self, outdir):
assert self.config_group is not None
flags = []
prefix = self.toolchain_settings.lower() + '.linker.'
# General
if self.key_as_bool(prefix + 'general.DoNotUseStandardStartFiles'):
flags.append('-nostartfiles')
if self.key_as_bool(prefix + 'general.DoNotUseDefaultLibraries'):
flags.append('-nodefaultlibs')
if self.key_as_bool(prefix + 'general.NoStartupOrDefaultLibs'):
flags.append('-nostdlib')
if self.key_as_bool(prefix + 'general.OmitAllSymbolInformation'):
flags.append('-s')
if self.key_as_bool(prefix + 'general.NoSharedLibraries'):
flags.append('-static')
if self.key_as_bool(prefix + 'general.GenerateMAPFile', True):
flags.append('-Wl,-Map="' + self.output_name + '.map"')
if self.key_as_bool(prefix + 'general.UseNewlibNano'):
flags.append('--specs=nano.specs')
# AdditionalSpecs: if you want it - read it from './/armgcc.linker.general.AdditionalSpecs'
# Libraries
inc_libs = self.key_as_strlist(prefix + 'libraries.Libraries', '{}')
for ref_lib in self.ref_libs:
inc_libs.append(ref_lib.raw_name)
inc_libs_group = ''
for inc_lib in inc_libs:
inc_libs_group += ' -l' + RefLibrary.extract_name(inc_lib)
flags.append('-Wl,--start-group{} -Wl,--end-group'.format(inc_libs_group))
flags += self.key_as_strlist(prefix + 'libraries.LibrarySearchPaths', '-L"{}"')
for lib in self.ref_libs:
flags.append('-L"../{}/{}"'.format(lib.path, outdir))
# Optimization
if self.key_as_bool(prefix + 'optimization.GarbageCollectUnusedSections'):
flags.append('-Wl,--gc-sections')
if self.key_as_bool(prefix + 'optimization.EnableUnsafeMatchOptimizations'):
flags.append('-funsafe-math-optimizations')
if self.key_as_bool(prefix + 'optimization.EnableFastMath'):
flags.append('-ffast-math')
if self.key_as_bool(prefix + 'optimization.GeneratePositionIndependentCode'):
flags.append('-fpic')
# Memory Settings
# Miscellaneous
flags.append(self.key_as_str(prefix + 'miscellaneous.LinkerFlags', '{}'))
flags += self.key_as_strlist(prefix + 'miscellaneous.OtherOptions', '-Xlinker {}')
flags += self.key_as_strlist(prefix + 'miscellaneous.OtherObjects', '{}')
return flags
def archiver_flags(self):
assert self.config_group is not None
flags = []
prefix = self.toolchain_settings.lower() + '.archiver.'
flags.append(self.key_as_str(prefix + 'general.ArchiverFlags', '{}', '-r'))
return flags
class RefLibrary(object):
LIB_PREFIX = 'lib'
LIB_EXT = '.a'
def __init__(self, path, raw_name):
assert raw_name.find(self.LIB_PREFIX) == -1
assert raw_name.find(self.LIB_EXT) == -1
self.path = path
self.raw_name = raw_name
def lib_name(self, with_ext=False):
if with_ext:
return self.LIB_PREFIX + self.raw_name + self.LIB_EXT
else:
return self.LIB_PREFIX + self.raw_name
def full_name(self, config):
return '{}/{}/{}'.format(self.path, config, self.lib_name(True))
@classmethod
def extract_name(cls, lib_name):
s = lib_name
if cls.LIB_PREFIX in s:
s = s[len(cls.LIB_PREFIX):]
if s.endswith(cls.LIB_EXT):
s = s[:len(s) - len(cls.LIB_EXT)]
return s
|
{
"content_hash": "c4f89ab01f41879649b6df8b436648c6",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 111,
"avg_line_length": 45.55474452554745,
"alnum_prop": 0.6051113603589169,
"repo_name": "alunegov/AtmelStudioToNinja",
"id": "ddc5b761c3cdb7695ac58997fb44010548c9881b",
"size": "12482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "asninja/parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33632"
}
],
"symlink_target": ""
}
|
from flask import Flask, jsonify
from modules.get_dir_list import path_to_dict
app = Flask(__name__)
@app.route('/flask/treeRoot')
def send_root_dirs():
nodes = path_to_dict('/mnt/data', 1)
return jsonify(nodes = [nodes])
|
{
"content_hash": "491ba93cf0dbbb357bb31f4fb83ed362",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 45,
"avg_line_length": 25.77777777777778,
"alnum_prop": 0.6810344827586207,
"repo_name": "vangalamaheshh/bib",
"id": "b784dc984362f6d4aa88b37366b8fb4f37e14454",
"size": "296",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/flask/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "79"
},
{
"name": "HTML",
"bytes": "538"
},
{
"name": "JavaScript",
"bytes": "1972"
},
{
"name": "Python",
"bytes": "925"
},
{
"name": "Shell",
"bytes": "256"
},
{
"name": "TypeScript",
"bytes": "6664"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) 2014,掌阅科技
All rights reserved.
摘 要: agent.py
创 建 者: zhuangshixiong
创建日期: 2015-08-26
"""
import urllib
import operator
import json
from difflib import Differ
from tornado.web import authenticated
from peewee import OperationalError
from kazoo.exceptions import NoNodeError
from handler.bases import CommonBaseHandler
from handler.bases import ArgsMap
from lib import route
from lib.excel import ExcelWorkBook
from model.db.zd_qconf_agent import ZdQconfAgent
from model.db.zd_zookeeper import ZdZookeeper
from service import zookeeper as ZookeeperService
from conf import log
@route(r'/config/agent/index', '查看')
class ZdQconfAgentIndexHandler(CommonBaseHandler):
'''index, 查看
'''
args_list = [
ArgsMap('pageSize', 'page_size', default=30),
ArgsMap('pageCurrent', 'current_page', default=1),
ArgsMap('orderDirection', 'order_direction', default="asc"),
ArgsMap('orderField', 'order_field', default="id"),
]
@authenticated
def response(self):
'''index
'''
clauses = self.parse_query(ZdQconfAgent)
order = getattr(ZdQconfAgent, self.order_field)
records = ZdQconfAgent.select().order_by(
getattr(order, self.order_direction)()
).where(reduce(operator.and_, clauses))
self.render('config/agent/index.html',
action='/config/agent/index',
total=records.count(),
current_page=self.current_page,
page_size=self.page_size,
records=records.paginate(self.current_page, self.page_size))
@route(r'/config/agent/watch', '观察')
class WsAgentWatchHandler(CommonBaseHandler):
'''watch, 观察
'''
args_list = [
ArgsMap('agent_register_prefix', default="/qconf/__qconf_register_hosts")
]
@authenticated
def response(self):
'''watch
'''
clusters = ZdZookeeper.select().where(ZdZookeeper.deleted == "0")
self.render('config/agent/watch.html',
clusters=clusters,
agent_register_prefix=self.agent_register_prefix)
@route(r'/config/agent/checkagents', '检查agents')
class WsAgentCheckAgentsHandler(CommonBaseHandler):
'''check agents
'''
args_list = [
ArgsMap('cluster_name', required=True),
ArgsMap('agent_register_prefix', default="/qconf/__qconf_register_hosts")
]
@authenticated
def response(self):
'''watch
'''
zoo_client = ZookeeperService.get_zoo_client(self.cluster_name)
if not zoo_client:
return self.ajax_popup(code=300, msg="连接zookeeper出错!")
try:
zk_agents = zoo_client.get_children(self.agent_register_prefix)
except NoNodeError:
return self.ajax_popup(code=300, msg="节点路径不存在!")
records = ZdQconfAgent.select().where(
(ZdQconfAgent.cluster_name == self.cluster_name) &
(ZdQconfAgent.deleted == '0')
)
mysql_agents = [record.hostname for record in records]
# agent在mysql上的统计信息和在zookeeper上注册信息的对比
agents_stat = []
for diff_info in Differ().compare(mysql_agents, zk_agents):
agent_name = diff_info[2:]
if diff_info[0] == "+":
cmp_res = ['无', agent_name]
elif diff_info[0] == "-":
cmp_res = [agent_name, '无']
else:
cmp_res = [agent_name, agent_name]
agents_stat.append(cmp_res)
return agents_stat
@route(r'/config/agent/search')
class ZdQconfAgentSearchHandler(CommonBaseHandler):
'''search,搜索
'''
args_list = [
ArgsMap('pageSize', 'page_size', default=30),
ArgsMap('pageCurrent', 'current_page', default=1),
ArgsMap('orderDirection', 'order_direction', default="asc"),
ArgsMap('orderField', 'order_field', default="id"),
]
@authenticated
def response(self):
'''search
'''
clauses = self.parse_query(ZdQconfAgent)
order = getattr(ZdQconfAgent, self.order_field)
records = ZdQconfAgent.select().order_by(
getattr(order, self.order_direction)()
).where(reduce(operator.and_, clauses))
self.render('config/agent/datagrid.html',
total=records.count(),
current_page=self.current_page,
page_size=self.page_size,
records=records.paginate(self.current_page, self.page_size))
@route(r'/config/agent/save')
class ZdQconfAgentSaveHandler(CommonBaseHandler):
"""save
"""
args_list = [
ArgsMap('id', default=''),
ArgsMap('ip', default=''),
ArgsMap('hostname', default=''),
ArgsMap('cluster_name', default=''),
ArgsMap('notes', default=''),
ArgsMap('create_user', default=''),
ArgsMap('create_time', default=''),
ArgsMap('update_user', default=''),
ArgsMap('update_time', default=''),
ArgsMap('deleted', default=''),
]
@authenticated
def response(self):
'''add
'''
if self.id:
# 修改记录
tb_inst = ZdQconfAgent.one(id=self.id)
else:
# 新增记录
tb_inst = ZdQconfAgent()
if self.id:
tb_inst.id = self.id
if self.ip:
tb_inst.ip = self.ip
if self.hostname:
tb_inst.hostname = self.hostname
if self.cluster_name:
tb_inst.cluster_name = self.cluster_name
if self.notes:
tb_inst.notes = self.notes
if self.create_user:
tb_inst.create_user = self.create_user
if self.create_time:
tb_inst.create_time = self.create_time
if self.update_user:
tb_inst.update_user = self.update_user
if self.update_time:
tb_inst.update_time = self.update_time
if self.deleted:
tb_inst.deleted = self.deleted
tb_inst.save()
return self.ajax_ok(forward="/config/agent/index")
@route(r'/config/agent/add', '新增')
class ZdQconfAgentAddHandler(CommonBaseHandler):
'''add, 新增
'''
@authenticated
def response(self):
'''add
'''
clusters = ZdZookeeper.select().where(ZdZookeeper.deleted == "0")
return self.render('config/agent/add.html',
action='config/agent/save',
clusters=clusters)
@route(r'/config/agent/edit', '修改')
class ZdQconfAgentEditHandler(CommonBaseHandler):
"""edit, 修改
"""
args_list = [
ArgsMap('info_ids', default=''),
]
def response(self):
'''edit
'''
if self.info_ids:
id_li = self.info_ids.split(',')
if len(id_li) != 1:
return self.ajax_popup(close_current=False, code=300, msg="请选择单条记录进行修改")
clusters = ZdZookeeper.select().where(ZdZookeeper.deleted == "0")
record = ZdQconfAgent.one(id=id_li[0])
return self.render('config/agent/edit.html',
action='/config/agent/save',
clusters=clusters,
record=record)
else:
return self.ajax_popup(close_current=False, code=300, msg="请选择某条记录进行修改")
@route(r'/config/agent/delete', '删除')
class ZdQconfAgentDeleteHandler(CommonBaseHandler):
"""delete, 删除
"""
args_list = [
ArgsMap('info_ids', default=''),
]
def response(self):
'''delete
'''
if not self.info_ids:
return self.ajax_popup(close_current=False, code=300, msg="请选择某条记录进行删除")
id_list = self.info_ids.split(',')
try:
del_query = ZdQconfAgent.delete().where(ZdQconfAgent.id << id_list)
del_query.execute()
except OperationalError as exc:
log.error("error occurred while delete agents, ids: %s\n%s", id_list, str(exc))
return self.ajax_popup(close_current=False, code=300, msg="删除失败!")
return self.ajax_ok(close_current=False)
@route(r'/config/agent/export', '导出')
class ZdQconfAgentExportHandler(CommonBaseHandler):
"""export,导出数据到excel
"""
args_list = [
ArgsMap('info_ids', default=''),
]
def response(self):
'''导出选中数据到excel中
'''
id_li = self.info_ids.split(',')
sheet_text = ZdQconfAgent.select().where(ZdQconfAgent.id << id_li)
sheet_title = [
{'name': 'ip'},
{'name': '主机名'},
{'name': '说明'},
]
bind_attr = (
'ip',
'hostname',
'notes',
)
ewb = ExcelWorkBook()
sheet_name = ZdQconfAgent._meta.db_table
ewb.add_sheet(sheet_name)
ewb.add_title(sheet_name, sheet_title)
ewb.add_text(sheet_name, sheet_text, bind=bind_attr)
filename = '{}.xls'.format(sheet_name)
filename = urllib.urlencode({'filename': filename})
self.set_header('Content-Disposition', 'attachment;{}'.format(filename))
self.finish(ewb.get_stream())
|
{
"content_hash": "5d3934550cd9fffa22bd40f4c6483878",
"timestamp": "",
"source": "github",
"line_count": 296,
"max_line_length": 91,
"avg_line_length": 30.99662162162162,
"alnum_prop": 0.573732970027248,
"repo_name": "ireaderlab/zkdash",
"id": "f1bce9be46165d8e5b440c576874d86bdd02f728",
"size": "9492",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "handler/config/agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "202395"
},
{
"name": "HTML",
"bytes": "78559"
},
{
"name": "JavaScript",
"bytes": "1737634"
},
{
"name": "PHP",
"bytes": "44496"
},
{
"name": "Python",
"bytes": "114405"
},
{
"name": "Shell",
"bytes": "404"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from contextlib import contextmanager
from celery import states
from celery.exceptions import IncompleteStream, TimeoutError
from celery.five import range
from celery.result import (
AsyncResult,
EagerResult,
TaskSetResult,
result_from_tuple,
)
from celery.utils import uuid
from celery.utils.serialization import pickle
from celery.tests.case import AppCase, Mock, depends_on_current_app, patch
def mock_task(name, state, result):
return dict(id=uuid(), name=name, state=state, result=result)
def save_result(app, task):
traceback = 'Some traceback'
if task['state'] == states.SUCCESS:
app.backend.mark_as_done(task['id'], task['result'])
elif task['state'] == states.RETRY:
app.backend.mark_as_retry(
task['id'], task['result'], traceback=traceback,
)
else:
app.backend.mark_as_failure(
task['id'], task['result'], traceback=traceback,
)
def make_mock_group(app, size=10):
tasks = [mock_task('ts%d' % i, states.SUCCESS, i) for i in range(size)]
[save_result(app, task) for task in tasks]
return [app.AsyncResult(task['id']) for task in tasks]
class test_AsyncResult(AppCase):
def setup(self):
self.task1 = mock_task('task1', states.SUCCESS, 'the')
self.task2 = mock_task('task2', states.SUCCESS, 'quick')
self.task3 = mock_task('task3', states.FAILURE, KeyError('brown'))
self.task4 = mock_task('task3', states.RETRY, KeyError('red'))
for task in (self.task1, self.task2, self.task3, self.task4):
save_result(self.app, task)
@self.app.task(shared=False)
def mytask():
pass
self.mytask = mytask
def test_compat_properties(self):
x = self.app.AsyncResult('1')
self.assertEqual(x.task_id, x.id)
x.task_id = '2'
self.assertEqual(x.id, '2')
def test_children(self):
x = self.app.AsyncResult('1')
children = [EagerResult(str(i), i, states.SUCCESS) for i in range(3)]
x._cache = {'children': children, 'status': states.SUCCESS}
x.backend = Mock()
self.assertTrue(x.children)
self.assertEqual(len(x.children), 3)
def test_propagates_for_parent(self):
x = self.app.AsyncResult(uuid())
x.backend = Mock(name='backend')
x.backend.get_task_meta.return_value = {}
x.backend.wait_for.return_value = {
'status': states.SUCCESS, 'result': 84,
}
x.parent = EagerResult(uuid(), KeyError('foo'), states.FAILURE)
with self.assertRaises(KeyError):
x.get(propagate=True)
self.assertFalse(x.backend.wait_for.called)
x.parent = EagerResult(uuid(), 42, states.SUCCESS)
self.assertEqual(x.get(propagate=True), 84)
self.assertTrue(x.backend.wait_for.called)
def test_get_children(self):
tid = uuid()
x = self.app.AsyncResult(tid)
child = [self.app.AsyncResult(uuid()).as_tuple()
for i in range(10)]
x._cache = {'children': child}
self.assertTrue(x.children)
self.assertEqual(len(x.children), 10)
x._cache = {'status': states.SUCCESS}
x.backend._cache[tid] = {'result': None}
self.assertIsNone(x.children)
def test_build_graph_get_leaf_collect(self):
x = self.app.AsyncResult('1')
x.backend._cache['1'] = {'status': states.SUCCESS, 'result': None}
c = [EagerResult(str(i), i, states.SUCCESS) for i in range(3)]
x.iterdeps = Mock()
x.iterdeps.return_value = (
(None, x),
(x, c[0]),
(c[0], c[1]),
(c[1], c[2])
)
x.backend.READY_STATES = states.READY_STATES
self.assertTrue(x.graph)
self.assertIs(x.get_leaf(), 2)
it = x.collect()
self.assertListEqual(list(it), [
(x, None),
(c[0], 0),
(c[1], 1),
(c[2], 2),
])
def test_iterdeps(self):
x = self.app.AsyncResult('1')
c = [EagerResult(str(i), i, states.SUCCESS) for i in range(3)]
x._cache = {'status': states.SUCCESS, 'result': None, 'children': c}
for child in c:
child.backend = Mock()
child.backend.get_children.return_value = []
it = x.iterdeps()
self.assertListEqual(list(it), [
(None, x),
(x, c[0]),
(x, c[1]),
(x, c[2]),
])
x._cache = None
x.ready = Mock()
x.ready.return_value = False
with self.assertRaises(IncompleteStream):
list(x.iterdeps())
list(x.iterdeps(intermediate=True))
def test_eq_not_implemented(self):
self.assertFalse(self.app.AsyncResult('1') == object())
@depends_on_current_app
def test_reduce(self):
a1 = self.app.AsyncResult('uuid', task_name=self.mytask.name)
restored = pickle.loads(pickle.dumps(a1))
self.assertEqual(restored.id, 'uuid')
self.assertEqual(restored.task_name, self.mytask.name)
a2 = self.app.AsyncResult('uuid')
self.assertEqual(pickle.loads(pickle.dumps(a2)).id, 'uuid')
def test_successful(self):
ok_res = self.app.AsyncResult(self.task1['id'])
nok_res = self.app.AsyncResult(self.task3['id'])
nok_res2 = self.app.AsyncResult(self.task4['id'])
self.assertTrue(ok_res.successful())
self.assertFalse(nok_res.successful())
self.assertFalse(nok_res2.successful())
pending_res = self.app.AsyncResult(uuid())
self.assertFalse(pending_res.successful())
def test_str(self):
ok_res = self.app.AsyncResult(self.task1['id'])
ok2_res = self.app.AsyncResult(self.task2['id'])
nok_res = self.app.AsyncResult(self.task3['id'])
self.assertEqual(str(ok_res), self.task1['id'])
self.assertEqual(str(ok2_res), self.task2['id'])
self.assertEqual(str(nok_res), self.task3['id'])
pending_id = uuid()
pending_res = self.app.AsyncResult(pending_id)
self.assertEqual(str(pending_res), pending_id)
def test_repr(self):
ok_res = self.app.AsyncResult(self.task1['id'])
ok2_res = self.app.AsyncResult(self.task2['id'])
nok_res = self.app.AsyncResult(self.task3['id'])
self.assertEqual(repr(ok_res), '<AsyncResult: %s>' % (
self.task1['id']))
self.assertEqual(repr(ok2_res), '<AsyncResult: %s>' % (
self.task2['id']))
self.assertEqual(repr(nok_res), '<AsyncResult: %s>' % (
self.task3['id']))
pending_id = uuid()
pending_res = self.app.AsyncResult(pending_id)
self.assertEqual(repr(pending_res), '<AsyncResult: %s>' % (
pending_id))
def test_hash(self):
self.assertEqual(hash(self.app.AsyncResult('x0w991')),
hash(self.app.AsyncResult('x0w991')))
self.assertNotEqual(hash(self.app.AsyncResult('x0w991')),
hash(self.app.AsyncResult('x1w991')))
def test_get_traceback(self):
ok_res = self.app.AsyncResult(self.task1['id'])
nok_res = self.app.AsyncResult(self.task3['id'])
nok_res2 = self.app.AsyncResult(self.task4['id'])
self.assertFalse(ok_res.traceback)
self.assertTrue(nok_res.traceback)
self.assertTrue(nok_res2.traceback)
pending_res = self.app.AsyncResult(uuid())
self.assertFalse(pending_res.traceback)
def test_get(self):
ok_res = self.app.AsyncResult(self.task1['id'])
ok2_res = self.app.AsyncResult(self.task2['id'])
nok_res = self.app.AsyncResult(self.task3['id'])
nok2_res = self.app.AsyncResult(self.task4['id'])
self.assertEqual(ok_res.get(), 'the')
self.assertEqual(ok2_res.get(), 'quick')
with self.assertRaises(KeyError):
nok_res.get()
self.assertTrue(nok_res.get(propagate=False))
self.assertIsInstance(nok2_res.result, KeyError)
self.assertEqual(ok_res.info, 'the')
def test_get_timeout(self):
res = self.app.AsyncResult(self.task4['id']) # has RETRY state
with self.assertRaises(TimeoutError):
res.get(timeout=0.001)
pending_res = self.app.AsyncResult(uuid())
with patch('celery.result.time') as _time:
with self.assertRaises(TimeoutError):
pending_res.get(timeout=0.001, interval=0.001)
_time.sleep.assert_called_with(0.001)
def test_get_timeout_longer(self):
res = self.app.AsyncResult(self.task4['id']) # has RETRY state
with patch('celery.result.time') as _time:
with self.assertRaises(TimeoutError):
res.get(timeout=1, interval=1)
_time.sleep.assert_called_with(1)
def test_ready(self):
oks = (self.app.AsyncResult(self.task1['id']),
self.app.AsyncResult(self.task2['id']),
self.app.AsyncResult(self.task3['id']))
self.assertTrue(all(result.ready() for result in oks))
self.assertFalse(self.app.AsyncResult(self.task4['id']).ready())
self.assertFalse(self.app.AsyncResult(uuid()).ready())
class test_ResultSet(AppCase):
def test_resultset_repr(self):
self.assertTrue(repr(self.app.ResultSet(
[self.app.AsyncResult(t) for t in ['1', '2', '3']])))
def test_eq_other(self):
self.assertFalse(self.app.ResultSet([1, 3, 3]) == 1)
self.assertTrue(self.app.ResultSet([1]) == self.app.ResultSet([1]))
def test_get(self):
x = self.app.ResultSet([self.app.AsyncResult(t) for t in [1, 2, 3]])
b = x.results[0].backend = Mock()
b.supports_native_join = False
x.join_native = Mock()
x.join = Mock()
x.get()
self.assertTrue(x.join.called)
b.supports_native_join = True
x.get()
self.assertTrue(x.join_native.called)
def test_get_empty(self):
x = self.app.ResultSet([])
self.assertIsNone(x.supports_native_join)
x.join = Mock(name='join')
x.get()
self.assertTrue(x.join.called)
def test_add(self):
x = self.app.ResultSet([1])
x.add(2)
self.assertEqual(len(x), 2)
x.add(2)
self.assertEqual(len(x), 2)
@contextmanager
def dummy_copy(self):
with patch('celery.result.copy') as copy:
def passt(arg):
return arg
copy.side_effect = passt
yield
def test_iterate_respects_subpolling_interval(self):
r1 = self.app.AsyncResult(uuid())
r2 = self.app.AsyncResult(uuid())
backend = r1.backend = r2.backend = Mock()
backend.subpolling_interval = 10
ready = r1.ready = r2.ready = Mock()
def se(*args, **kwargs):
ready.side_effect = KeyError()
return False
ready.return_value = False
ready.side_effect = se
x = self.app.ResultSet([r1, r2])
with self.dummy_copy():
with patch('celery.result.time') as _time:
with self.assertPendingDeprecation():
with self.assertRaises(KeyError):
list(x.iterate())
_time.sleep.assert_called_with(10)
backend.subpolling_interval = 0
with patch('celery.result.time') as _time:
with self.assertPendingDeprecation():
with self.assertRaises(KeyError):
ready.return_value = False
ready.side_effect = se
list(x.iterate())
self.assertFalse(_time.sleep.called)
def test_times_out(self):
r1 = self.app.AsyncResult(uuid)
r1.ready = Mock()
r1.ready.return_value = False
x = self.app.ResultSet([r1])
with self.dummy_copy():
with patch('celery.result.time'):
with self.assertPendingDeprecation():
with self.assertRaises(TimeoutError):
list(x.iterate(timeout=1))
def test_add_discard(self):
x = self.app.ResultSet([])
x.add(self.app.AsyncResult('1'))
self.assertIn(self.app.AsyncResult('1'), x.results)
x.discard(self.app.AsyncResult('1'))
x.discard(self.app.AsyncResult('1'))
x.discard('1')
self.assertNotIn(self.app.AsyncResult('1'), x.results)
x.update([self.app.AsyncResult('2')])
def test_clear(self):
x = self.app.ResultSet([])
r = x.results
x.clear()
self.assertIs(x.results, r)
class MockAsyncResultFailure(AsyncResult):
@property
def result(self):
return KeyError('baz')
@property
def state(self):
return states.FAILURE
def get(self, propagate=True, **kwargs):
if propagate:
raise self.result
return self.result
class MockAsyncResultSuccess(AsyncResult):
forgotten = False
def forget(self):
self.forgotten = True
@property
def result(self):
return 42
@property
def state(self):
return states.SUCCESS
def get(self, **kwargs):
return self.result
class SimpleBackend(object):
ids = []
def __init__(self, ids=[]):
self.ids = ids
def get_many(self, *args, **kwargs):
return ((id, {'result': i, 'status': states.SUCCESS})
for i, id in enumerate(self.ids))
class test_TaskSetResult(AppCase):
def setup(self):
self.size = 10
self.ts = TaskSetResult(uuid(), make_mock_group(self.app, self.size))
def test_total(self):
self.assertEqual(self.ts.total, self.size)
def test_compat_properties(self):
self.assertEqual(self.ts.taskset_id, self.ts.id)
self.ts.taskset_id = 'foo'
self.assertEqual(self.ts.taskset_id, 'foo')
def test_compat_subtasks_kwarg(self):
x = TaskSetResult(uuid(), subtasks=[1, 2, 3])
self.assertEqual(x.results, [1, 2, 3])
def test_itersubtasks(self):
it = self.ts.itersubtasks()
for i, t in enumerate(it):
self.assertEqual(t.get(), i)
class test_GroupResult(AppCase):
def setup(self):
self.size = 10
self.ts = self.app.GroupResult(
uuid(), make_mock_group(self.app, self.size),
)
@depends_on_current_app
def test_is_pickleable(self):
ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())])
self.assertEqual(pickle.loads(pickle.dumps(ts)), ts)
ts2 = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())])
self.assertEqual(pickle.loads(pickle.dumps(ts2)), ts2)
def test_len(self):
self.assertEqual(len(self.ts), self.size)
def test_eq_other(self):
self.assertFalse(self.ts == 1)
@depends_on_current_app
def test_reduce(self):
self.assertTrue(pickle.loads(pickle.dumps(self.ts)))
def test_iterate_raises(self):
ar = MockAsyncResultFailure(uuid(), app=self.app)
ts = self.app.GroupResult(uuid(), [ar])
with self.assertPendingDeprecation():
it = ts.iterate()
with self.assertRaises(KeyError):
next(it)
def test_forget(self):
subs = [MockAsyncResultSuccess(uuid(), app=self.app),
MockAsyncResultSuccess(uuid(), app=self.app)]
ts = self.app.GroupResult(uuid(), subs)
ts.forget()
for sub in subs:
self.assertTrue(sub.forgotten)
def test_getitem(self):
subs = [MockAsyncResultSuccess(uuid(), app=self.app),
MockAsyncResultSuccess(uuid(), app=self.app)]
ts = self.app.GroupResult(uuid(), subs)
self.assertIs(ts[0], subs[0])
def test_save_restore(self):
subs = [MockAsyncResultSuccess(uuid(), app=self.app),
MockAsyncResultSuccess(uuid(), app=self.app)]
ts = self.app.GroupResult(uuid(), subs)
ts.save()
with self.assertRaises(AttributeError):
ts.save(backend=object())
self.assertEqual(self.app.GroupResult.restore(ts.id).subtasks,
ts.subtasks)
ts.delete()
self.assertIsNone(self.app.GroupResult.restore(ts.id))
with self.assertRaises(AttributeError):
self.app.GroupResult.restore(ts.id, backend=object())
def test_join_native(self):
backend = SimpleBackend()
subtasks = [self.app.AsyncResult(uuid(), backend=backend)
for i in range(10)]
ts = self.app.GroupResult(uuid(), subtasks)
ts.app.backend = backend
backend.ids = [subtask.id for subtask in subtasks]
res = ts.join_native()
self.assertEqual(res, list(range(10)))
def test_join_native_raises(self):
ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())])
ts.iter_native = Mock()
ts.iter_native.return_value = iter([
(uuid(), {'status': states.FAILURE, 'result': KeyError()})
])
with self.assertRaises(KeyError):
ts.join_native(propagate=True)
def test_failed_join_report(self):
res = Mock()
ts = self.app.GroupResult(uuid(), [res])
res.state = states.FAILURE
res.backend.is_cached.return_value = True
self.assertIs(next(ts._failed_join_report()), res)
res.backend.is_cached.return_value = False
with self.assertRaises(StopIteration):
next(ts._failed_join_report())
def test_repr(self):
self.assertTrue(repr(
self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())])
))
def test_children_is_results(self):
ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())])
self.assertIs(ts.children, ts.results)
def test_iter_native(self):
backend = SimpleBackend()
subtasks = [self.app.AsyncResult(uuid(), backend=backend)
for i in range(10)]
ts = self.app.GroupResult(uuid(), subtasks)
ts.app.backend = backend
backend.ids = [subtask.id for subtask in subtasks]
self.assertEqual(len(list(ts.iter_native())), 10)
def test_iterate_yields(self):
ar = MockAsyncResultSuccess(uuid(), app=self.app)
ar2 = MockAsyncResultSuccess(uuid(), app=self.app)
ts = self.app.GroupResult(uuid(), [ar, ar2])
with self.assertPendingDeprecation():
it = ts.iterate()
self.assertEqual(next(it), 42)
self.assertEqual(next(it), 42)
def test_iterate_eager(self):
ar1 = EagerResult(uuid(), 42, states.SUCCESS)
ar2 = EagerResult(uuid(), 42, states.SUCCESS)
ts = self.app.GroupResult(uuid(), [ar1, ar2])
with self.assertPendingDeprecation():
it = ts.iterate()
self.assertEqual(next(it), 42)
self.assertEqual(next(it), 42)
def test_join_timeout(self):
ar = MockAsyncResultSuccess(uuid(), app=self.app)
ar2 = MockAsyncResultSuccess(uuid(), app=self.app)
ar3 = self.app.AsyncResult(uuid())
ts = self.app.GroupResult(uuid(), [ar, ar2, ar3])
with self.assertRaises(TimeoutError):
ts.join(timeout=0.0000001)
ar4 = self.app.AsyncResult(uuid())
ar4.get = Mock()
ts2 = self.app.GroupResult(uuid(), [ar4])
self.assertTrue(ts2.join(timeout=0.1))
def test_iter_native_when_empty_group(self):
ts = self.app.GroupResult(uuid(), [])
self.assertListEqual(list(ts.iter_native()), [])
def test_iterate_simple(self):
with self.assertPendingDeprecation():
it = self.ts.iterate()
results = sorted(list(it))
self.assertListEqual(results, list(range(self.size)))
def test___iter__(self):
self.assertListEqual(list(iter(self.ts)), self.ts.results)
def test_join(self):
joined = self.ts.join()
self.assertListEqual(joined, list(range(self.size)))
def test_successful(self):
self.assertTrue(self.ts.successful())
def test_failed(self):
self.assertFalse(self.ts.failed())
def test_waiting(self):
self.assertFalse(self.ts.waiting())
def test_ready(self):
self.assertTrue(self.ts.ready())
def test_completed_count(self):
self.assertEqual(self.ts.completed_count(), len(self.ts))
class test_pending_AsyncResult(AppCase):
def setup(self):
self.task = self.app.AsyncResult(uuid())
def test_result(self):
self.assertIsNone(self.task.result)
class test_failed_AsyncResult(test_GroupResult):
def setup(self):
self.size = 11
subtasks = make_mock_group(self.app, 10)
failed = mock_task('ts11', states.FAILURE, KeyError('Baz'))
save_result(self.app, failed)
failed_res = self.app.AsyncResult(failed['id'])
self.ts = self.app.GroupResult(uuid(), subtasks + [failed_res])
def test_completed_count(self):
self.assertEqual(self.ts.completed_count(), len(self.ts) - 1)
def test_iterate_simple(self):
with self.assertPendingDeprecation():
it = self.ts.iterate()
def consume():
return list(it)
with self.assertRaises(KeyError):
consume()
def test_join(self):
with self.assertRaises(KeyError):
self.ts.join()
def test_successful(self):
self.assertFalse(self.ts.successful())
def test_failed(self):
self.assertTrue(self.ts.failed())
class test_pending_Group(AppCase):
def setup(self):
self.ts = self.app.GroupResult(
uuid(), [self.app.AsyncResult(uuid()),
self.app.AsyncResult(uuid())])
def test_completed_count(self):
self.assertEqual(self.ts.completed_count(), 0)
def test_ready(self):
self.assertFalse(self.ts.ready())
def test_waiting(self):
self.assertTrue(self.ts.waiting())
def x_join(self):
with self.assertRaises(TimeoutError):
self.ts.join(timeout=0.001)
def x_join_longer(self):
with self.assertRaises(TimeoutError):
self.ts.join(timeout=1)
class test_EagerResult(AppCase):
def setup(self):
@self.app.task(shared=False)
def raising(x, y):
raise KeyError(x, y)
self.raising = raising
def test_wait_raises(self):
res = self.raising.apply(args=[3, 3])
with self.assertRaises(KeyError):
res.wait()
self.assertTrue(res.wait(propagate=False))
def test_wait(self):
res = EagerResult('x', 'x', states.RETRY)
res.wait()
self.assertEqual(res.state, states.RETRY)
self.assertEqual(res.status, states.RETRY)
def test_forget(self):
res = EagerResult('x', 'x', states.RETRY)
res.forget()
def test_revoke(self):
res = self.raising.apply(args=[3, 3])
self.assertFalse(res.revoke())
class test_tuples(AppCase):
def test_AsyncResult(self):
x = self.app.AsyncResult(uuid())
self.assertEqual(x, result_from_tuple(x.as_tuple(), self.app))
self.assertEqual(x, result_from_tuple(x, self.app))
def test_with_parent(self):
x = self.app.AsyncResult(uuid())
x.parent = self.app.AsyncResult(uuid())
y = result_from_tuple(x.as_tuple(), self.app)
self.assertEqual(y, x)
self.assertEqual(y.parent, x.parent)
self.assertIsInstance(y.parent, AsyncResult)
def test_compat(self):
uid = uuid()
x = result_from_tuple([uid, []], app=self.app)
self.assertEqual(x.id, uid)
def test_GroupResult(self):
x = self.app.GroupResult(
uuid(), [self.app.AsyncResult(uuid()) for _ in range(10)],
)
self.assertEqual(x, result_from_tuple(x.as_tuple(), self.app))
self.assertEqual(x, result_from_tuple(x, self.app))
|
{
"content_hash": "3171e5d47f5cebe6b9630ba5d49316b6",
"timestamp": "",
"source": "github",
"line_count": 731,
"max_line_length": 77,
"avg_line_length": 33.01641586867305,
"alnum_prop": 0.5894344313238036,
"repo_name": "bdh1011/wau",
"id": "50a9e234eeff8d3842ce7f747b88bb43889dfbd1",
"size": "24135",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/celery/tests/tasks/test_result.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1176"
},
{
"name": "C",
"bytes": "5022853"
},
{
"name": "C++",
"bytes": "43676"
},
{
"name": "CSS",
"bytes": "10359"
},
{
"name": "D",
"bytes": "1841"
},
{
"name": "FORTRAN",
"bytes": "3707"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "Groff",
"bytes": "7236"
},
{
"name": "HTML",
"bytes": "1709320"
},
{
"name": "JavaScript",
"bytes": "1200059"
},
{
"name": "Jupyter Notebook",
"bytes": "310219"
},
{
"name": "Lua",
"bytes": "11887"
},
{
"name": "Makefile",
"bytes": "112163"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Objective-C",
"bytes": "1291"
},
{
"name": "Perl",
"bytes": "171375"
},
{
"name": "Python",
"bytes": "49407229"
},
{
"name": "Ruby",
"bytes": "58403"
},
{
"name": "Shell",
"bytes": "47672"
},
{
"name": "Smarty",
"bytes": "22599"
},
{
"name": "Tcl",
"bytes": "426334"
},
{
"name": "XSLT",
"bytes": "153073"
}
],
"symlink_target": ""
}
|
import magento
import json
from .api import Core
from trytond.model import ModelView, fields
from trytond.pool import PoolMeta, Pool
from trytond.transaction import Transaction
from trytond.pyson import PYSONEncoder, Eval
from trytond.wizard import (
Wizard, StateView, Button, StateAction, StateTransition
)
__all__ = [
'ExportMagentoShipmentStatusStart',
'ExportMagentoShipmentStatus', 'ImportMagentoCarriersStart',
'ImportMagentoCarriers', 'ConfigureMagento',
'TestMagentoConnectionStart', 'ImportWebsitesStart',
'ImportStoresStart', 'FailureStart', 'SuccessStart',
]
__metaclass__ = PoolMeta
class ImportMagentoCarriersStart(ModelView):
"Import Carriers Start"
__name__ = 'magento.wizard_import_carriers.start'
message = fields.Text("Message", readonly=True)
class ImportMagentoCarriers(Wizard):
"""
Wizard to import carriers / shipping methods for channel
"""
__name__ = 'magento.wizard_import_carriers'
start = StateView(
'magento.wizard_import_carriers.start',
'magento.wizard_import_magento_carriers_start_view_form',
[
Button('Ok', 'end', 'tryton-ok'),
]
)
def default_start(self, data):
"""
Import carriers and show the user appropriate message
:param data: Wizard data
"""
Channel = Pool().get('sale.channel')
channel = Channel(Transaction().context.get('active_id'))
channel.validate_magento_channel()
return {
'message':
"This wizard has imported all the carriers / " +
"shipping methods for this magento channel. You should now " +
"configure the imported carriers / shipping methods to " +
"match the shipment carriers in Tryton to allow seamless " +
"synchronisation of tracking information."
}
class ExportMagentoShipmentStatusStart(ModelView):
"Export Shipment Status View"
__name__ = 'magento.wizard_export_shipment_status.start'
message = fields.Text("Message", readonly=True)
class ExportMagentoShipmentStatus(Wizard):
"""
Export Shipment Status Wizard
Exports shipment status for sale orders related to current store view
"""
__name__ = 'magento.wizard_export_shipment_status'
start = StateView(
'magento.wizard_export_shipment_status.start',
'magento.wizard_export_magento_shipment_status_view_start_form',
[
Button('Cancel', 'end', 'tryton-cancel'),
Button('Continue', 'export_', 'tryton-ok', default=True),
]
)
export_ = StateAction('sale.act_sale_form')
def default_start(self, data):
"""
Sets default data for wizard
:param data: Wizard data
"""
Channel = Pool().get('sale.channel')
channel = Channel(Transaction().context.get('active_id'))
channel.validate_magento_channel()
return {
'message':
"This wizard will export shipment status for all the " +
"shipments related to this store view. To export tracking " +
"information also for these shipments please check the " +
"checkbox for Export Tracking Information on Store View."
}
def do_export_(self, action):
"""Handles the transition"""
Channel = Pool().get('sale.channel')
channel = Channel(Transaction().context.get('active_id'))
channel.validate_magento_channel()
sales = channel.export_shipment_status_to_magento()
action['pyson_domain'] = PYSONEncoder().encode(
[('id', 'in', map(int, sales))]
)
return action, {}
def transition_export_(self):
return 'end'
class ConfigureMagento(Wizard):
"""
Wizard To Configure Magento
"""
__name__ = 'magento.wizard_configure_magento'
start = StateView(
'magento.wizard_test_connection.start',
'magento.wizard_test_magento_connection_view_form',
[
Button('Cancel', 'end', 'tryton-cancel'),
Button('Next', 'website', 'tryton-go-next', 'True'),
]
)
website = StateTransition()
import_website = StateView(
'magento.wizard_import_websites.start',
'magento.wizard_import_websites_view_form',
[
Button('Next', 'store', 'tryton-go-next', 'True'),
]
)
store = StateTransition()
import_store = StateView(
'magento.wizard_import_stores.start',
'magento.wizard_import_stores_view_form',
[
Button('Next', 'success', 'tryton-go-next', 'True'),
]
)
success = StateView(
'magento.wizard_configuration_success.start',
'magento.wizard_configuration_success_view_form',
[
Button('Ok', 'end', 'tryton-ok')
]
)
failure = StateView(
'magento.wizard_configuration_failure.start',
'magento.wizard_configuration_failure_view_form',
[
Button('Ok', 'end', 'tryton-ok')
]
)
def default_start(self, data):
"""
Test the connection for current magento channel
"""
Channel = Pool().get('sale.channel')
magento_channel = Channel(Transaction().context.get('active_id'))
magento_channel.validate_magento_channel()
# Test Connection
magento_channel.test_magento_connection()
return {
'channel': magento_channel.id
}
def transition_website(self):
"""
Import websites for current magento channel
"""
magento_channel = self.start.channel
self.import_website.__class__.magento_websites.selection = \
self.get_websites()
if not (
magento_channel.magento_website_id and
magento_channel.magento_store_id
):
return 'import_website'
if not self.validate_websites():
return 'failure'
return 'end'
def transition_store(self):
"""
Initialize the values of website in sale channel
"""
self.import_store.__class__.magento_stores.selection = \
self.get_stores()
return 'import_store'
def default_success(self, data):
"""
Initialize the values of store in sale channel
"""
channel = self.start.channel
imported_store = self.import_store.magento_stores
imported_website = self.import_website.magento_websites
magento_website = json.loads(imported_website)
channel.magento_website_id = magento_website['id']
channel.magento_website_name = magento_website['name']
channel.magento_website_code = magento_website['code']
magento_store = json.loads(imported_store)
channel.magento_store_id = magento_store['store_id']
channel.magento_store_name = magento_store['name']
channel.save()
return {}
def get_websites(self):
"""
Returns the list of websites
"""
magento_channel = self.start.channel
with Core(
magento_channel.magento_url, magento_channel.magento_api_user,
magento_channel.magento_api_key
) as core_api:
websites = core_api.websites()
selection = []
for website in websites:
# XXX: An UGLY way to map json to selection, fix me
website_data = {
'code': website['code'],
'id': website['website_id'],
'name': website['name']
}
website_data = json.dumps(website_data)
selection.append((website_data, website['name']))
return selection
def get_stores(self):
"""
Return list of all stores
"""
magento_channel = self.start.channel
selected_website = json.loads(self.import_website.magento_websites)
with Core(
magento_channel.magento_url, magento_channel.magento_api_user,
magento_channel.magento_api_key
) as core_api:
stores = core_api.stores(selected_website['id'])
all_stores = []
for store in stores:
# Create the new dictionary of required values from a dictionary,
# and convert it into the string
store_data = {
'store_id': store['default_store_id'],
'name': store['name']
}
store_data = json.dumps(store_data)
all_stores.append((store_data, store['name']))
return all_stores
def validate_websites(self):
"""
Validate the website of magento channel
"""
magento_channel = self.start.channel
current_website_configurations = {
'code': magento_channel.magento_website_code,
'id': str(magento_channel.magento_website_id),
'name': magento_channel.magento_website_name
}
current_website = (
json.dumps(current_website_configurations),
magento_channel.magento_website_name
)
if current_website not in self.get_websites():
return False
return True
class TestMagentoConnectionStart(ModelView):
"Test Connection"
__name__ = 'magento.wizard_test_connection.start'
channel = fields.Many2One(
'sale.channel', 'Sale Channel', required=True, readonly=True
)
class ImportWebsitesStart(ModelView):
"""
Import Websites Start View
"""
__name__ = 'magento.wizard_import_websites.start'
magento_websites = fields.Selection([], 'Select Website', required=True)
class ImportStoresStart(ModelView):
"""
Import stores from websites
"""
__name__ = 'magento.wizard_import_stores.start'
magento_stores = fields.Selection([], 'Select Store', required=True)
class FailureStart(ModelView):
"""
Failure wizard
"""
__name__ = 'magento.wizard_configuration_failure.start'
class SuccessStart(ModelView):
"""
Get Done
"""
__name__ = 'magento.wizard_configuration_success.start'
class UpdateMagentoCatalogStart(ModelView):
'Update Catalog View'
__name__ = 'magento.update_catalog.start'
class UpdateMagentoCatalog(Wizard):
'''
Update Catalog
This is a wizard to update already imported products
'''
__name__ = 'magento.update_catalog'
start = StateView(
'magento.update_catalog.start',
'magento.magento_update_catalog_start_view_form', [
Button('Cancel', 'end', 'tryton-cancel'),
Button('Continue', 'update_', 'tryton-ok', default=True),
]
)
update_ = StateAction('product.act_template_form')
def do_update_(self, action):
"""Handles the transition"""
Channel = Pool().get('sale.channel')
channel = Channel(Transaction().context.get('active_id'))
channel.validate_magento_channel()
product_template_ids = self.update_products(channel)
action['pyson_domain'] = PYSONEncoder().encode(
[('id', 'in', product_template_ids)])
return action, {}
def transition_import_(self):
return 'end'
def update_products(self, channel):
"""
Updates products for current magento_channel
:param channel: Browse record of channel
:return: List of product IDs
"""
ChannelListing = Pool().get('product.product.channel_listing')
products = []
channel_listings = ChannelListing.search([
('channel', '=', self),
('state', '=', 'active'),
])
with Transaction().set_context({'current_channel': channel.id}):
for listing in channel_listings:
products.append(
listing.product.update_from_magento()
)
return map(int, products)
class ExportDataWizardConfigure(ModelView):
"Export Data Start View"
__name__ = 'sale.channel.export_data.configure'
category = fields.Many2One(
'product.category', 'Magento Category', states={
'required': Eval('channel_source') == 'magento',
'invisible': Eval('channel_source') != 'magento',
}, depends=['channel_source'], domain=[('magento_ids', 'not in', [])],
)
attribute_set = fields.Selection(
[], 'Attribute Set', states={
'required': Eval('channel_source') == 'magento',
'invisible': Eval('channel_source') != 'magento',
}, depends=['channel_source'],
)
channel_source = fields.Char("Channel Source")
@classmethod
def get_attribute_sets(cls):
"""Get the list of attribute sets from magento for the current channel
:return: Tuple of attribute sets where each tuple consists of (ID,Name)
"""
Channel = Pool().get('sale.channel')
if not Transaction().context.get('active_id'):
return []
channel = Channel(Transaction().context['active_id'])
channel.validate_magento_channel()
with magento.ProductAttributeSet(
channel.magento_url, channel.magento_api_user,
channel.magento_api_key
) as attribute_set_api:
attribute_sets = attribute_set_api.list()
return [(
attribute_set['set_id'], attribute_set['name']
) for attribute_set in attribute_sets]
@classmethod
def fields_view_get(cls, view_id=None, view_type='form'):
"""This method is overridden to populate the selection field for
attribute_set with the attribute sets from the current channel's
counterpart on magento.
This overridding has to be done because `active_id` is not available
if the meth:get_attribute_sets is called directly from the field.
"""
rv = super(
ExportDataWizardConfigure, cls
).fields_view_get(view_id, view_type)
rv['fields']['attribute_set']['selection'] = cls.get_attribute_sets()
return rv
class ExportDataWizard:
"Wizard to export data to external channel"
__name__ = 'sale.channel.export_data'
configure = StateView(
'sale.channel.export_data.configure',
'magento.export_data_configure_view_form',
[
Button('Cancel', 'end', 'tryton-cancel'),
Button('Continue', 'next', 'tryton-go-next', default=True),
]
)
def default_configure(self, data):
Channel = Pool().get('sale.channel')
channel = Channel(Transaction().context.get('active_id'))
return {
'channel_source': channel.source
}
def transition_next(self):
Channel = Pool().get('sale.channel')
channel = Channel(Transaction().context.get('active_id'))
if channel.source == 'magento':
return 'configure'
return super(ExportDataWizard, self).transition_next()
def transition_export_(self):
"""
Export the products for the selected category on this channel
"""
Channel = Pool().get('sale.channel')
channel = Channel(Transaction().context['active_id'])
if channel.source != 'magento':
return super(ExportDataWizard, self).transition_export_()
with Transaction().set_context({
'current_channel': channel.id,
'magento_attribute_set': self.start.attribute_set,
'category': self.start.category,
}):
return super(ExportDataWizard, self).transition_export_()
|
{
"content_hash": "1f5524ed8e5e29463b62f59b6bf347fb",
"timestamp": "",
"source": "github",
"line_count": 522,
"max_line_length": 79,
"avg_line_length": 30.011494252873565,
"alnum_prop": 0.597408400357462,
"repo_name": "prakashpp/trytond-magento",
"id": "2e0c7e99cffceb0c3935644b880c9fa73e40c776",
"size": "15690",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "wizard.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "221344"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hqadmin', '0003_auto_20160715_1543'),
]
operations = [
migrations.AlterField(
model_name='vcmmigration',
name='migrated',
field=models.DateTimeField(null=True),
preserve_default=True,
),
]
|
{
"content_hash": "d29b2adbf4ee8dbcd809e201f063b1db",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 50,
"avg_line_length": 22.352941176470587,
"alnum_prop": 0.5789473684210527,
"repo_name": "dimagi/commcare-hq",
"id": "9d387d8479be914a1899dc34af4a39c59f7b883e",
"size": "380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/hqadmin/migrations/0004_auto_20160715_1547.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
# pylint: disable-msg=W0612,E1101
from warnings import catch_warnings
import re
import operator
import pytest
from numpy.random import randn
import numpy as np
from pandas.core.api import DataFrame, Panel
from pandas.core.computation import expressions as expr
from pandas import compat, _np_version_under1p11, _np_version_under1p13
from pandas.util.testing import (assert_almost_equal, assert_series_equal,
assert_frame_equal, assert_panel_equal)
from pandas.io.formats.printing import pprint_thing
import pandas.util.testing as tm
_frame = DataFrame(randn(10000, 4), columns=list('ABCD'), dtype='float64')
_frame2 = DataFrame(randn(100, 4), columns=list('ABCD'), dtype='float64')
_mixed = DataFrame({'A': _frame['A'].copy(),
'B': _frame['B'].astype('float32'),
'C': _frame['C'].astype('int64'),
'D': _frame['D'].astype('int32')})
_mixed2 = DataFrame({'A': _frame2['A'].copy(),
'B': _frame2['B'].astype('float32'),
'C': _frame2['C'].astype('int64'),
'D': _frame2['D'].astype('int32')})
_integer = DataFrame(
np.random.randint(1, 100,
size=(10001, 4)),
columns=list('ABCD'), dtype='int64')
_integer2 = DataFrame(np.random.randint(1, 100, size=(101, 4)),
columns=list('ABCD'), dtype='int64')
with catch_warnings(record=True):
_frame_panel = Panel(dict(ItemA=_frame.copy(),
ItemB=(_frame.copy() + 3),
ItemC=_frame.copy(),
ItemD=_frame.copy()))
_frame2_panel = Panel(dict(ItemA=_frame2.copy(),
ItemB=(_frame2.copy() + 3),
ItemC=_frame2.copy(),
ItemD=_frame2.copy()))
_integer_panel = Panel(dict(ItemA=_integer,
ItemB=(_integer + 34).astype('int64')))
_integer2_panel = Panel(dict(ItemA=_integer2,
ItemB=(_integer2 + 34).astype('int64')))
_mixed_panel = Panel(dict(ItemA=_mixed, ItemB=(_mixed + 3)))
_mixed2_panel = Panel(dict(ItemA=_mixed2, ItemB=(_mixed2 + 3)))
@pytest.mark.skipif(not expr._USE_NUMEXPR, reason='not using numexpr')
class TestExpressions(object):
def setup_method(self, method):
self.frame = _frame.copy()
self.frame2 = _frame2.copy()
self.mixed = _mixed.copy()
self.mixed2 = _mixed2.copy()
self.integer = _integer.copy()
self._MIN_ELEMENTS = expr._MIN_ELEMENTS
def teardown_method(self, method):
expr._MIN_ELEMENTS = self._MIN_ELEMENTS
def run_arithmetic(self, df, other, assert_func, check_dtype=False,
test_flex=True):
expr._MIN_ELEMENTS = 0
operations = ['add', 'sub', 'mul', 'mod', 'truediv', 'floordiv']
if not compat.PY3:
operations.append('div')
for arith in operations:
operator_name = arith
if arith == 'div':
operator_name = 'truediv'
if test_flex:
op = lambda x, y: getattr(df, arith)(y)
op.__name__ = arith
else:
op = getattr(operator, operator_name)
expr.set_use_numexpr(False)
expected = op(df, other)
expr.set_use_numexpr(True)
result = op(df, other)
try:
if check_dtype:
if arith == 'truediv':
assert expected.dtype.kind == 'f'
assert_func(expected, result)
except Exception:
pprint_thing("Failed test with operator %r" % op.__name__)
raise
def test_integer_arithmetic(self):
self.run_arithmetic(self.integer, self.integer,
assert_frame_equal)
self.run_arithmetic(self.integer.iloc[:, 0],
self.integer.iloc[:, 0], assert_series_equal,
check_dtype=True)
def run_binary(self, df, other, assert_func, test_flex=False,
numexpr_ops=set(['gt', 'lt', 'ge', 'le', 'eq', 'ne'])):
"""
tests solely that the result is the same whether or not numexpr is
enabled. Need to test whether the function does the correct thing
elsewhere.
"""
expr._MIN_ELEMENTS = 0
expr.set_test_mode(True)
operations = ['gt', 'lt', 'ge', 'le', 'eq', 'ne']
for arith in operations:
if test_flex:
op = lambda x, y: getattr(df, arith)(y)
op.__name__ = arith
else:
op = getattr(operator, arith)
expr.set_use_numexpr(False)
expected = op(df, other)
expr.set_use_numexpr(True)
expr.get_test_result()
result = op(df, other)
used_numexpr = expr.get_test_result()
try:
if arith in numexpr_ops:
assert used_numexpr, "Did not use numexpr as expected."
else:
assert not used_numexpr, "Used numexpr unexpectedly."
assert_func(expected, result)
except Exception:
pprint_thing("Failed test with operation %r" % arith)
pprint_thing("test_flex was %r" % test_flex)
raise
def run_frame(self, df, other, binary_comp=None, run_binary=True,
**kwargs):
self.run_arithmetic(df, other, assert_frame_equal,
test_flex=False, **kwargs)
self.run_arithmetic(df, other, assert_frame_equal, test_flex=True,
**kwargs)
if run_binary:
if binary_comp is None:
expr.set_use_numexpr(False)
binary_comp = other + 1
expr.set_use_numexpr(True)
self.run_binary(df, binary_comp, assert_frame_equal,
test_flex=False, **kwargs)
self.run_binary(df, binary_comp, assert_frame_equal,
test_flex=True, **kwargs)
def run_series(self, ser, other, binary_comp=None, **kwargs):
self.run_arithmetic(ser, other, assert_series_equal,
test_flex=False, **kwargs)
self.run_arithmetic(ser, other, assert_almost_equal,
test_flex=True, **kwargs)
# series doesn't uses vec_compare instead of numexpr...
# if binary_comp is None:
# binary_comp = other + 1
# self.run_binary(ser, binary_comp, assert_frame_equal,
# test_flex=False, **kwargs)
# self.run_binary(ser, binary_comp, assert_frame_equal,
# test_flex=True, **kwargs)
def run_panel(self, panel, other, binary_comp=None, run_binary=True,
assert_func=assert_panel_equal, **kwargs):
self.run_arithmetic(panel, other, assert_func, test_flex=False,
**kwargs)
self.run_arithmetic(panel, other, assert_func, test_flex=True,
**kwargs)
if run_binary:
if binary_comp is None:
binary_comp = other + 1
self.run_binary(panel, binary_comp, assert_func,
test_flex=False, **kwargs)
self.run_binary(panel, binary_comp, assert_func,
test_flex=True, **kwargs)
def test_integer_arithmetic_frame(self):
self.run_frame(self.integer, self.integer)
def test_integer_arithmetic_series(self):
self.run_series(self.integer.iloc[:, 0], self.integer.iloc[:, 0])
@pytest.mark.slow
def test_integer_panel(self):
self.run_panel(_integer2_panel, np.random.randint(1, 100))
def test_float_arithemtic_frame(self):
self.run_frame(self.frame2, self.frame2)
def test_float_arithmetic_series(self):
self.run_series(self.frame2.iloc[:, 0], self.frame2.iloc[:, 0])
@pytest.mark.slow
def test_float_panel(self):
self.run_panel(_frame2_panel, np.random.randn() + 0.1, binary_comp=0.8)
def test_mixed_arithmetic_frame(self):
# TODO: FIGURE OUT HOW TO GET IT TO WORK...
# can't do arithmetic because comparison methods try to do *entire*
# frame instead of by-column
self.run_frame(self.mixed2, self.mixed2, run_binary=False)
def test_mixed_arithmetic_series(self):
for col in self.mixed2.columns:
self.run_series(self.mixed2[col], self.mixed2[col], binary_comp=4)
@pytest.mark.slow
def test_mixed_panel(self):
self.run_panel(_mixed2_panel, np.random.randint(1, 100),
binary_comp=-2)
def test_float_arithemtic(self):
self.run_arithmetic(self.frame, self.frame, assert_frame_equal)
self.run_arithmetic(self.frame.iloc[:, 0], self.frame.iloc[:, 0],
assert_series_equal, check_dtype=True)
def test_mixed_arithmetic(self):
self.run_arithmetic(self.mixed, self.mixed, assert_frame_equal)
for col in self.mixed.columns:
self.run_arithmetic(self.mixed[col], self.mixed[col],
assert_series_equal)
def test_integer_with_zeros(self):
self.integer *= np.random.randint(0, 2, size=np.shape(self.integer))
self.run_arithmetic(self.integer, self.integer,
assert_frame_equal)
self.run_arithmetic(self.integer.iloc[:, 0],
self.integer.iloc[:, 0], assert_series_equal)
def test_invalid(self):
# no op
result = expr._can_use_numexpr(operator.add, None, self.frame,
self.frame, 'evaluate')
assert not result
# mixed
result = expr._can_use_numexpr(operator.add, '+', self.mixed,
self.frame, 'evaluate')
assert not result
# min elements
result = expr._can_use_numexpr(operator.add, '+', self.frame2,
self.frame2, 'evaluate')
assert not result
# ok, we only check on first part of expression
result = expr._can_use_numexpr(operator.add, '+', self.frame,
self.frame2, 'evaluate')
assert result
def test_binary_ops(self):
def testit():
for f, f2 in [(self.frame, self.frame2),
(self.mixed, self.mixed2)]:
for op, op_str in [('add', '+'), ('sub', '-'), ('mul', '*'),
('div', '/'), ('pow', '**')]:
# numpy >= 1.11 doesn't handle integers
# raised to integer powers
# https://github.com/pandas-dev/pandas/issues/15363
if op == 'pow' and not _np_version_under1p11:
continue
if op == 'div':
op = getattr(operator, 'truediv', None)
else:
op = getattr(operator, op, None)
if op is not None:
result = expr._can_use_numexpr(op, op_str, f, f,
'evaluate')
assert result != f._is_mixed_type
result = expr.evaluate(op, op_str, f, f,
use_numexpr=True)
expected = expr.evaluate(op, op_str, f, f,
use_numexpr=False)
if isinstance(result, DataFrame):
tm.assert_frame_equal(result, expected)
else:
tm.assert_numpy_array_equal(result,
expected.values)
result = expr._can_use_numexpr(op, op_str, f2, f2,
'evaluate')
assert not result
expr.set_use_numexpr(False)
testit()
expr.set_use_numexpr(True)
expr.set_numexpr_threads(1)
testit()
expr.set_numexpr_threads()
testit()
def test_boolean_ops(self):
def testit():
for f, f2 in [(self.frame, self.frame2),
(self.mixed, self.mixed2)]:
f11 = f
f12 = f + 1
f21 = f2
f22 = f2 + 1
for op, op_str in [('gt', '>'), ('lt', '<'), ('ge', '>='),
('le', '<='), ('eq', '=='), ('ne', '!=')]:
op = getattr(operator, op)
result = expr._can_use_numexpr(op, op_str, f11, f12,
'evaluate')
assert result != f11._is_mixed_type
result = expr.evaluate(op, op_str, f11, f12,
use_numexpr=True)
expected = expr.evaluate(op, op_str, f11, f12,
use_numexpr=False)
if isinstance(result, DataFrame):
tm.assert_frame_equal(result, expected)
else:
tm.assert_numpy_array_equal(result, expected.values)
result = expr._can_use_numexpr(op, op_str, f21, f22,
'evaluate')
assert not result
expr.set_use_numexpr(False)
testit()
expr.set_use_numexpr(True)
expr.set_numexpr_threads(1)
testit()
expr.set_numexpr_threads()
testit()
def test_where(self):
def testit():
for f in [self.frame, self.frame2, self.mixed, self.mixed2]:
for cond in [True, False]:
c = np.empty(f.shape, dtype=np.bool_)
c.fill(cond)
result = expr.where(c, f.values, f.values + 1)
expected = np.where(c, f.values, f.values + 1)
tm.assert_numpy_array_equal(result, expected)
expr.set_use_numexpr(False)
testit()
expr.set_use_numexpr(True)
expr.set_numexpr_threads(1)
testit()
expr.set_numexpr_threads()
testit()
def test_bool_ops_raise_on_arithmetic(self):
df = DataFrame({'a': np.random.rand(10) > 0.5,
'b': np.random.rand(10) > 0.5})
names = 'div', 'truediv', 'floordiv', 'pow'
ops = '/', '/', '//', '**'
msg = 'operator %r not implemented for bool dtypes'
for op, name in zip(ops, names):
if not compat.PY3 or name != 'div':
f = getattr(operator, name)
err_msg = re.escape(msg % op)
with tm.assert_raises_regex(NotImplementedError, err_msg):
f(df, df)
with tm.assert_raises_regex(NotImplementedError, err_msg):
f(df.a, df.b)
with tm.assert_raises_regex(NotImplementedError, err_msg):
f(df.a, True)
with tm.assert_raises_regex(NotImplementedError, err_msg):
f(False, df.a)
with tm.assert_raises_regex(TypeError, err_msg):
f(False, df)
with tm.assert_raises_regex(TypeError, err_msg):
f(df, True)
def test_bool_ops_warn_on_arithmetic(self):
n = 10
df = DataFrame({'a': np.random.rand(n) > 0.5,
'b': np.random.rand(n) > 0.5})
names = 'add', 'mul', 'sub'
ops = '+', '*', '-'
subs = {'+': '|', '*': '&', '-': '^'}
sub_funcs = {'|': 'or_', '&': 'and_', '^': 'xor'}
for op, name in zip(ops, names):
f = getattr(operator, name)
fe = getattr(operator, sub_funcs[subs[op]])
# >= 1.13.0 these are now TypeErrors
if op == '-' and not _np_version_under1p13:
continue
with tm.use_numexpr(True, min_elements=5):
with tm.assert_produces_warning(check_stacklevel=False):
r = f(df, df)
e = fe(df, df)
tm.assert_frame_equal(r, e)
with tm.assert_produces_warning(check_stacklevel=False):
r = f(df.a, df.b)
e = fe(df.a, df.b)
tm.assert_series_equal(r, e)
with tm.assert_produces_warning(check_stacklevel=False):
r = f(df.a, True)
e = fe(df.a, True)
tm.assert_series_equal(r, e)
with tm.assert_produces_warning(check_stacklevel=False):
r = f(False, df.a)
e = fe(False, df.a)
tm.assert_series_equal(r, e)
with tm.assert_produces_warning(check_stacklevel=False):
r = f(False, df)
e = fe(False, df)
tm.assert_frame_equal(r, e)
with tm.assert_produces_warning(check_stacklevel=False):
r = f(df, True)
e = fe(df, True)
tm.assert_frame_equal(r, e)
|
{
"content_hash": "c8aa9286edcbeb13fe7aba9ad17e75a1",
"timestamp": "",
"source": "github",
"line_count": 443,
"max_line_length": 79,
"avg_line_length": 39.73363431151242,
"alnum_prop": 0.4924440404499489,
"repo_name": "louispotok/pandas",
"id": "56e00fa8af23d3b77b0f90e710d0ed33170f971e",
"size": "17626",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pandas/tests/test_expressions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3847"
},
{
"name": "C",
"bytes": "432930"
},
{
"name": "C++",
"bytes": "17193"
},
{
"name": "HTML",
"bytes": "551714"
},
{
"name": "Makefile",
"bytes": "563"
},
{
"name": "PowerShell",
"bytes": "2970"
},
{
"name": "Python",
"bytes": "13452425"
},
{
"name": "Shell",
"bytes": "25056"
},
{
"name": "Smarty",
"bytes": "2045"
}
],
"symlink_target": ""
}
|
from __future__ import division, unicode_literals
import base64
import io
import itertools
import time
from .fragment import FragmentFD
from ..compat import (
compat_etree_fromstring,
compat_urlparse,
compat_urllib_error,
compat_urllib_parse_urlparse,
compat_struct_pack,
compat_struct_unpack,
)
from ..utils import (
fix_xml_ampersands,
xpath_text,
)
class DataTruncatedError(Exception):
pass
class FlvReader(io.BytesIO):
"""
Reader for Flv files
The file format is documented in https://www.adobe.com/devnet/f4v.html
"""
def read_bytes(self, n):
data = self.read(n)
if len(data) < n:
raise DataTruncatedError(
'FlvReader error: need %d bytes while only %d bytes got' % (
n, len(data)))
return data
# Utility functions for reading numbers and strings
def read_unsigned_long_long(self):
return compat_struct_unpack('!Q', self.read_bytes(8))[0]
def read_unsigned_int(self):
return compat_struct_unpack('!I', self.read_bytes(4))[0]
def read_unsigned_char(self):
return compat_struct_unpack('!B', self.read_bytes(1))[0]
def read_string(self):
res = b''
while True:
char = self.read_bytes(1)
if char == b'\x00':
break
res += char
return res
def read_box_info(self):
"""
Read a box and return the info as a tuple: (box_size, box_type, box_data)
"""
real_size = size = self.read_unsigned_int()
box_type = self.read_bytes(4)
header_end = 8
if size == 1:
real_size = self.read_unsigned_long_long()
header_end = 16
return real_size, box_type, self.read_bytes(real_size - header_end)
def read_asrt(self):
# version
self.read_unsigned_char()
# flags
self.read_bytes(3)
quality_entry_count = self.read_unsigned_char()
# QualityEntryCount
for i in range(quality_entry_count):
self.read_string()
segment_run_count = self.read_unsigned_int()
segments = []
for i in range(segment_run_count):
first_segment = self.read_unsigned_int()
fragments_per_segment = self.read_unsigned_int()
segments.append((first_segment, fragments_per_segment))
return {
'segment_run': segments,
}
def read_afrt(self):
# version
self.read_unsigned_char()
# flags
self.read_bytes(3)
# time scale
self.read_unsigned_int()
quality_entry_count = self.read_unsigned_char()
# QualitySegmentUrlModifiers
for i in range(quality_entry_count):
self.read_string()
fragments_count = self.read_unsigned_int()
fragments = []
for i in range(fragments_count):
first = self.read_unsigned_int()
first_ts = self.read_unsigned_long_long()
duration = self.read_unsigned_int()
if duration == 0:
discontinuity_indicator = self.read_unsigned_char()
else:
discontinuity_indicator = None
fragments.append({
'first': first,
'ts': first_ts,
'duration': duration,
'discontinuity_indicator': discontinuity_indicator,
})
return {
'fragments': fragments,
}
def read_abst(self):
# version
self.read_unsigned_char()
# flags
self.read_bytes(3)
self.read_unsigned_int() # BootstrapinfoVersion
# Profile,Live,Update,Reserved
flags = self.read_unsigned_char()
live = flags & 0x20 != 0
# time scale
self.read_unsigned_int()
# CurrentMediaTime
self.read_unsigned_long_long()
# SmpteTimeCodeOffset
self.read_unsigned_long_long()
self.read_string() # MovieIdentifier
server_count = self.read_unsigned_char()
# ServerEntryTable
for i in range(server_count):
self.read_string()
quality_count = self.read_unsigned_char()
# QualityEntryTable
for i in range(quality_count):
self.read_string()
# DrmData
self.read_string()
# MetaData
self.read_string()
segments_count = self.read_unsigned_char()
segments = []
for i in range(segments_count):
box_size, box_type, box_data = self.read_box_info()
assert box_type == b'asrt'
segment = FlvReader(box_data).read_asrt()
segments.append(segment)
fragments_run_count = self.read_unsigned_char()
fragments = []
for i in range(fragments_run_count):
box_size, box_type, box_data = self.read_box_info()
assert box_type == b'afrt'
fragments.append(FlvReader(box_data).read_afrt())
return {
'segments': segments,
'fragments': fragments,
'live': live,
}
def read_bootstrap_info(self):
total_size, box_type, box_data = self.read_box_info()
assert box_type == b'abst'
return FlvReader(box_data).read_abst()
def read_bootstrap_info(bootstrap_bytes):
return FlvReader(bootstrap_bytes).read_bootstrap_info()
def build_fragments_list(boot_info):
""" Return a list of (segment, fragment) for each fragment in the video """
res = []
segment_run_table = boot_info['segments'][0]
fragment_run_entry_table = boot_info['fragments'][0]['fragments']
first_frag_number = fragment_run_entry_table[0]['first']
fragments_counter = itertools.count(first_frag_number)
for segment, fragments_count in segment_run_table['segment_run']:
# In some live HDS streams (for example Rai), `fragments_count` is
# abnormal and causing out-of-memory errors. It's OK to change the
# number of fragments for live streams as they are updated periodically
if fragments_count == 4294967295 and boot_info['live']:
fragments_count = 2
for _ in range(fragments_count):
res.append((segment, next(fragments_counter)))
if boot_info['live']:
res = res[-2:]
return res
def write_unsigned_int(stream, val):
stream.write(compat_struct_pack('!I', val))
def write_unsigned_int_24(stream, val):
stream.write(compat_struct_pack('!I', val)[1:])
def write_flv_header(stream):
"""Writes the FLV header to stream"""
# FLV header
stream.write(b'FLV\x01')
stream.write(b'\x05')
stream.write(b'\x00\x00\x00\x09')
stream.write(b'\x00\x00\x00\x00')
def write_metadata_tag(stream, metadata):
"""Writes optional metadata tag to stream"""
SCRIPT_TAG = b'\x12'
FLV_TAG_HEADER_LEN = 11
if metadata:
stream.write(SCRIPT_TAG)
write_unsigned_int_24(stream, len(metadata))
stream.write(b'\x00\x00\x00\x00\x00\x00\x00')
stream.write(metadata)
write_unsigned_int(stream, FLV_TAG_HEADER_LEN + len(metadata))
def remove_encrypted_media(media):
return list(filter(lambda e: 'drmAdditionalHeaderId' not in e.attrib and
'drmAdditionalHeaderSetId' not in e.attrib,
media))
def _add_ns(prop):
return '{http://ns.adobe.com/f4m/1.0}%s' % prop
class F4mFD(FragmentFD):
"""
A downloader for f4m manifests or AdobeHDS.
"""
FD_NAME = 'f4m'
def _get_unencrypted_media(self, doc):
media = doc.findall(_add_ns('media'))
if not media:
self.report_error('No media found')
for e in (doc.findall(_add_ns('drmAdditionalHeader')) +
doc.findall(_add_ns('drmAdditionalHeaderSet'))):
# If id attribute is missing it's valid for all media nodes
# without drmAdditionalHeaderId or drmAdditionalHeaderSetId attribute
if 'id' not in e.attrib:
self.report_error('Missing ID in f4m DRM')
media = remove_encrypted_media(media)
if not media:
self.report_error('Unsupported DRM')
return media
def _get_bootstrap_from_url(self, bootstrap_url):
bootstrap = self.ydl.urlopen(bootstrap_url).read()
return read_bootstrap_info(bootstrap)
def _update_live_fragments(self, bootstrap_url, latest_fragment):
fragments_list = []
retries = 30
while (not fragments_list) and (retries > 0):
boot_info = self._get_bootstrap_from_url(bootstrap_url)
fragments_list = build_fragments_list(boot_info)
fragments_list = [f for f in fragments_list if f[1] > latest_fragment]
if not fragments_list:
# Retry after a while
time.sleep(5.0)
retries -= 1
if not fragments_list:
self.report_error('Failed to update fragments')
return fragments_list
def _parse_bootstrap_node(self, node, base_url):
# Sometimes non empty inline bootstrap info can be specified along
# with bootstrap url attribute (e.g. dummy inline bootstrap info
# contains whitespace characters in [1]). We will prefer bootstrap
# url over inline bootstrap info when present.
# 1. http://live-1-1.rutube.ru/stream/1024/HDS/SD/C2NKsS85HQNckgn5HdEmOQ/1454167650/S-s604419906/move/four/dirs/upper/1024-576p.f4m
bootstrap_url = node.get('url')
if bootstrap_url:
bootstrap_url = compat_urlparse.urljoin(
base_url, bootstrap_url)
boot_info = self._get_bootstrap_from_url(bootstrap_url)
else:
bootstrap_url = None
bootstrap = base64.b64decode(node.text.encode('ascii'))
boot_info = read_bootstrap_info(bootstrap)
return boot_info, bootstrap_url
def real_download(self, filename, info_dict):
man_url = info_dict['url']
requested_bitrate = info_dict.get('tbr')
self.to_screen('[%s] Downloading f4m manifest' % self.FD_NAME)
urlh = self.ydl.urlopen(self._prepare_url(info_dict, man_url))
man_url = urlh.geturl()
# Some manifests may be malformed, e.g. prosiebensat1 generated manifests
# (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244
# and https://github.com/rg3/youtube-dl/issues/7823)
manifest = fix_xml_ampersands(urlh.read().decode('utf-8', 'ignore')).strip()
doc = compat_etree_fromstring(manifest)
formats = [(int(f.attrib.get('bitrate', -1)), f)
for f in self._get_unencrypted_media(doc)]
if requested_bitrate is None or len(formats) == 1:
# get the best format
formats = sorted(formats, key=lambda f: f[0])
rate, media = formats[-1]
else:
rate, media = list(filter(
lambda f: int(f[0]) == requested_bitrate, formats))[0]
base_url = compat_urlparse.urljoin(man_url, media.attrib['url'])
bootstrap_node = doc.find(_add_ns('bootstrapInfo'))
# From Adobe F4M 3.0 spec:
# The <baseURL> element SHALL be the base URL for all relative
# (HTTP-based) URLs in the manifest. If <baseURL> is not present, said
# URLs should be relative to the location of the containing document.
boot_info, bootstrap_url = self._parse_bootstrap_node(bootstrap_node, man_url)
live = boot_info['live']
metadata_node = media.find(_add_ns('metadata'))
if metadata_node is not None:
metadata = base64.b64decode(metadata_node.text.encode('ascii'))
else:
metadata = None
fragments_list = build_fragments_list(boot_info)
test = self.params.get('test', False)
if test:
# We only download the first fragment
fragments_list = fragments_list[:1]
total_frags = len(fragments_list)
# For some akamai manifests we'll need to add a query to the fragment url
akamai_pv = xpath_text(doc, _add_ns('pv-2.0'))
ctx = {
'filename': filename,
'total_frags': total_frags,
'live': live,
}
self._prepare_frag_download(ctx)
dest_stream = ctx['dest_stream']
if ctx['complete_frags_downloaded_bytes'] == 0:
write_flv_header(dest_stream)
if not live:
write_metadata_tag(dest_stream, metadata)
base_url_parsed = compat_urllib_parse_urlparse(base_url)
self._start_frag_download(ctx)
frag_index = 0
while fragments_list:
seg_i, frag_i = fragments_list.pop(0)
frag_index += 1
if frag_index <= ctx['fragment_index']:
continue
name = 'Seg%d-Frag%d' % (seg_i, frag_i)
query = []
if base_url_parsed.query:
query.append(base_url_parsed.query)
if akamai_pv:
query.append(akamai_pv.strip(';'))
if info_dict.get('extra_param_to_segment_url'):
query.append(info_dict['extra_param_to_segment_url'])
url_parsed = base_url_parsed._replace(path=base_url_parsed.path + name, query='&'.join(query))
try:
success, down_data = self._download_fragment(ctx, url_parsed.geturl(), info_dict)
if not success:
return False
reader = FlvReader(down_data)
while True:
try:
_, box_type, box_data = reader.read_box_info()
except DataTruncatedError:
if test:
# In tests, segments may be truncated, and thus
# FlvReader may not be able to parse the whole
# chunk. If so, write the segment as is
# See https://github.com/rg3/youtube-dl/issues/9214
dest_stream.write(down_data)
break
raise
if box_type == b'mdat':
self._append_fragment(ctx, box_data)
break
except (compat_urllib_error.HTTPError, ) as err:
if live and (err.code == 404 or err.code == 410):
# We didn't keep up with the live window. Continue
# with the next available fragment.
msg = 'Fragment %d unavailable' % frag_i
self.report_warning(msg)
fragments_list = []
else:
raise
if not fragments_list and not test and live and bootstrap_url:
fragments_list = self._update_live_fragments(bootstrap_url, frag_i)
total_frags += len(fragments_list)
if fragments_list and (fragments_list[0][1] > frag_i + 1):
msg = 'Missed %d fragments' % (fragments_list[0][1] - (frag_i + 1))
self.report_warning(msg)
self._finish_frag_download(ctx)
return True
|
{
"content_hash": "55f11aa10b5b84ee2ff8188585501411",
"timestamp": "",
"source": "github",
"line_count": 429,
"max_line_length": 139,
"avg_line_length": 35.696969696969695,
"alnum_prop": 0.5718949980410082,
"repo_name": "lodemo/CATANA",
"id": "c8fde9a89093393132262f7b7d5ec60d83de4b8d",
"size": "15314",
"binary": false,
"copies": "24",
"ref": "refs/heads/master",
"path": "src/face_recognition/youtube_dl/downloader/f4m.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "4068"
},
{
"name": "HTML",
"bytes": "755393"
},
{
"name": "JavaScript",
"bytes": "1451186"
},
{
"name": "Jupyter Notebook",
"bytes": "12442842"
},
{
"name": "MATLAB",
"bytes": "29584"
},
{
"name": "Python",
"bytes": "5006823"
},
{
"name": "Shell",
"bytes": "154"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_wireless_controller_timers
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_wireless_controller_timers.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_wireless_controller_timers_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'wireless_controller_timers': {
'ble_scan_report_intv': '3',
'client_idle_timeout': '4',
'darrp_day': 'sunday',
'darrp_optimize': '6',
'discovery_interval': '7',
'echo_interval': '8',
'fake_ap_log': '9',
'ipsec_intf_cleanup': '10',
'radio_stats_interval': '11',
'rogue_ap_log': '12',
'sta_capability_interval': '13',
'sta_locate_timer': '14',
'sta_stats_interval': '15',
'vap_stats_interval': '16'
},
'vdom': 'root'}
is_error, changed, response = fortios_wireless_controller_timers.fortios_wireless_controller(input_data, fos_instance)
expected_data = {
'ble-scan-report-intv': '3',
'client-idle-timeout': '4',
'darrp-day': 'sunday',
'darrp-optimize': '6',
'discovery-interval': '7',
'echo-interval': '8',
'fake-ap-log': '9',
'ipsec-intf-cleanup': '10',
'radio-stats-interval': '11',
'rogue-ap-log': '12',
'sta-capability-interval': '13',
'sta-locate-timer': '14',
'sta-stats-interval': '15',
'vap-stats-interval': '16'
}
set_method_mock.assert_called_with('wireless-controller', 'timers', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_wireless_controller_timers_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'wireless_controller_timers': {
'ble_scan_report_intv': '3',
'client_idle_timeout': '4',
'darrp_day': 'sunday',
'darrp_optimize': '6',
'discovery_interval': '7',
'echo_interval': '8',
'fake_ap_log': '9',
'ipsec_intf_cleanup': '10',
'radio_stats_interval': '11',
'rogue_ap_log': '12',
'sta_capability_interval': '13',
'sta_locate_timer': '14',
'sta_stats_interval': '15',
'vap_stats_interval': '16'
},
'vdom': 'root'}
is_error, changed, response = fortios_wireless_controller_timers.fortios_wireless_controller(input_data, fos_instance)
expected_data = {
'ble-scan-report-intv': '3',
'client-idle-timeout': '4',
'darrp-day': 'sunday',
'darrp-optimize': '6',
'discovery-interval': '7',
'echo-interval': '8',
'fake-ap-log': '9',
'ipsec-intf-cleanup': '10',
'radio-stats-interval': '11',
'rogue-ap-log': '12',
'sta-capability-interval': '13',
'sta-locate-timer': '14',
'sta-stats-interval': '15',
'vap-stats-interval': '16'
}
set_method_mock.assert_called_with('wireless-controller', 'timers', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_wireless_controller_timers_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'wireless_controller_timers': {
'ble_scan_report_intv': '3',
'client_idle_timeout': '4',
'darrp_day': 'sunday',
'darrp_optimize': '6',
'discovery_interval': '7',
'echo_interval': '8',
'fake_ap_log': '9',
'ipsec_intf_cleanup': '10',
'radio_stats_interval': '11',
'rogue_ap_log': '12',
'sta_capability_interval': '13',
'sta_locate_timer': '14',
'sta_stats_interval': '15',
'vap_stats_interval': '16'
},
'vdom': 'root'}
is_error, changed, response = fortios_wireless_controller_timers.fortios_wireless_controller(input_data, fos_instance)
expected_data = {
'ble-scan-report-intv': '3',
'client-idle-timeout': '4',
'darrp-day': 'sunday',
'darrp-optimize': '6',
'discovery-interval': '7',
'echo-interval': '8',
'fake-ap-log': '9',
'ipsec-intf-cleanup': '10',
'radio-stats-interval': '11',
'rogue-ap-log': '12',
'sta-capability-interval': '13',
'sta-locate-timer': '14',
'sta-stats-interval': '15',
'vap-stats-interval': '16'
}
set_method_mock.assert_called_with('wireless-controller', 'timers', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_wireless_controller_timers_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'wireless_controller_timers': {
'random_attribute_not_valid': 'tag',
'ble_scan_report_intv': '3',
'client_idle_timeout': '4',
'darrp_day': 'sunday',
'darrp_optimize': '6',
'discovery_interval': '7',
'echo_interval': '8',
'fake_ap_log': '9',
'ipsec_intf_cleanup': '10',
'radio_stats_interval': '11',
'rogue_ap_log': '12',
'sta_capability_interval': '13',
'sta_locate_timer': '14',
'sta_stats_interval': '15',
'vap_stats_interval': '16'
},
'vdom': 'root'}
is_error, changed, response = fortios_wireless_controller_timers.fortios_wireless_controller(input_data, fos_instance)
expected_data = {
'ble-scan-report-intv': '3',
'client-idle-timeout': '4',
'darrp-day': 'sunday',
'darrp-optimize': '6',
'discovery-interval': '7',
'echo-interval': '8',
'fake-ap-log': '9',
'ipsec-intf-cleanup': '10',
'radio-stats-interval': '11',
'rogue-ap-log': '12',
'sta-capability-interval': '13',
'sta-locate-timer': '14',
'sta-stats-interval': '15',
'vap-stats-interval': '16'
}
set_method_mock.assert_called_with('wireless-controller', 'timers', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
|
{
"content_hash": "78d585d55eedea429b369434afdb40d5",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 133,
"avg_line_length": 36.31799163179916,
"alnum_prop": 0.5830645161290322,
"repo_name": "thaim/ansible",
"id": "8f5a418119a4e521375f320968db579339a187b3",
"size": "9376",
"binary": false,
"copies": "20",
"ref": "refs/heads/fix-broken-link",
"path": "test/units/modules/network/fortios/test_fortios_wireless_controller_timers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
}
|
"""
A general tool for converting data from the
dictionary format to an (n x k) python list that's
ready for training an sklearn algorithm
n--no. of key-value pairs in dictonary
k--no. of features being extracted
dictionary keys are names of persons in dataset
dictionary values are dictionaries, where each
key-value pair in the dict is the name
of a feature, and its value for that person
In addition to converting a dictionary to a numpy
array, you may want to separate the labels from the
features--this is what targetFeatureSplit is for
so, if you want to have the poi label as the target,
and the features you want to use are the person's
salary and bonus, here's what you would do:
feature_list = ["poi", "salary", "bonus"]
data_array = featureFormat( data_dictionary, feature_list )
label, features = targetFeatureSplit(data_array)
the line above (targetFeatureSplit) assumes that the
label is the _first_ item in feature_list--very important
that poi is listed first!
"""
import numpy as np
def featureFormat( dictionary, features, remove_NaN=True, remove_all_zeroes=True, remove_any_zeroes=False, sort_keys = False):
""" convert dictionary to numpy array of features
remove_NaN = True will convert "NaN" string to 0.0
remove_all_zeroes = True will omit any data points for which
all the features you seek are 0.0
remove_any_zeroes = True will omit any data points for which
any of the features you seek are 0.0
sort_keys = True sorts keys by alphabetical order. Setting the value as
a string opens the corresponding pickle file with a preset key
order (this is used for Python 3 compatibility, and sort_keys
should be left as False for the course mini-projects).
NOTE: first feature is assumed to be 'poi' and is not checked for
removal for zero or missing values.
"""
return_list = []
# Key order - first branch is for Python 3 compatibility on mini-projects,
# second branch is for compatibility on final project.
if isinstance(sort_keys, str):
import pickle
keys = pickle.load(open(sort_keys, "rb"))
elif sort_keys:
keys = sorted(dictionary.keys())
else:
keys = dictionary.keys()
for key in keys:
tmp_list = []
for feature in features:
try:
dictionary[key][feature]
except KeyError:
print "error: key ", feature, " not present"
return
value = dictionary[key][feature]
if value=="NaN" and remove_NaN:
value = 0
tmp_list.append( float(value) )
# Logic for deciding whether or not to add the data point.
append = True
# exclude 'poi' class as criteria.
if features[0] == 'poi':
test_list = tmp_list[1:]
else:
test_list = tmp_list
### if all features are zero and you want to remove
### data points that are all zero, do that here
if remove_all_zeroes:
append = False
for item in test_list:
if item != 0 and item != "NaN":
append = True
break
### if any features for a given data point are zero
### and you want to remove data points with any zeroes,
### handle that here
if remove_any_zeroes:
if 0 in test_list or "NaN" in test_list:
append = False
### Append the data point if flagged for addition.
if append:
return_list.append( np.array(tmp_list) )
return np.array(return_list)
def targetFeatureSplit( data ):
"""
given a numpy array like the one returned from
featureFormat, separate out the first feature
and put it into its own list (this should be the
quantity you want to predict)
return targets and features as separate lists
(sklearn can generally handle both lists and numpy arrays as
input formats when training/predicting)
"""
target = []
features = []
for item in data:
target.append( item[0] )
features.append( item[1:] )
return target, features
|
{
"content_hash": "242297fbb4720e0ade944edaa1b93d11",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 126,
"avg_line_length": 35.795081967213115,
"alnum_prop": 0.6180444240897641,
"repo_name": "wllmtrng/udacity_data_analyst_nanodegree",
"id": "ab7739b969ffa78ee7dab2cfc81e2558e09ae3cd",
"size": "4386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "P5 sklearn ML/tools/feature_format.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2387"
},
{
"name": "HTML",
"bytes": "16065728"
},
{
"name": "JavaScript",
"bytes": "867"
},
{
"name": "Jupyter Notebook",
"bytes": "3419330"
},
{
"name": "Python",
"bytes": "13221"
},
{
"name": "R",
"bytes": "11368"
}
],
"symlink_target": ""
}
|
"""Take alignments of PacBio data highest quality, any quality (low quality),
or subreads and output the best aligned molecules
The purpose of this is to produce an output with only one read per molecule,
and that molecule representing the best alignment of that read. Now the 'best'
alignment of a read is based on the following prioritized criteria. Once a molecule is selected from one level, it will not be selected from another level.
Inputs are alignment in BAM format.
1. Aligned - The HQ (high quality) uncorrected alignmented reads are considered the best. They are already single molecule. These are considered better than the HQ corrected alignments, because the HQ data should be set high enough that the correction process introduces more errors than it corrects, which would be expected with SNPs being artificially changed in correction at some point.
2. Aligned - The HQ corrected alignment is the next best. If its available it certainly should be very high quality data, just not quite as good as the HQ uncorrected.
3. Aligned - The AQ (any quality) corrected is the next best. These often include the HQ data, but since those have already been added if they are avaiable, its not an issue. Any additional corrected alignments will be added.
4. Aligned - The AQ uncorrected reads are next. These will be very diverse in their error rates.
5. Aligned - The corrected subreads are next. The best aligned subread from each molecule is selected, (chosen by most bases aligned in a single path).
6. Aligned - The uncorrected subreads are the last group used. Again they are chosen based on the best aligned subread from all the subreads of a molecule.
7. Unaligned - HQ - Any reads not aligned from the above sets that remain are added
8. Unaligned - HQ corrected - adding any more possible unaligned reads
9. Unaligned - AQ corrected - Any reads not aligned are added
10. Unaligned - AQ - Any reads not aligned are added
11. Unaligned - corrected subreads are added. since all will have an aligned base count of zero, the selection of which subread to add is based on subread length rather than aligned bases
12. Unaligned - uncorrected subreads are added. since all will have an aligned base count of zero, the selection of which subread to add is based on subread length rather than aligned bases
So basically we prioritize CCS reads so good they shouldn't be corrected first, then CCS reads that are corrected, then CCS reads, then best aligned subreads, and if thigns were not aligned we take the CCS read first, and the longest subread of the unaligned molecules last.
Unless a '.bam' file is specified the output is SAM format with the header derived from any one of the entered alignments.
"""
import argparse, sys, os, gzip, re
from seqtools.format.pacbio import PacBioReadName
from seqtools.format.sam import SAM
from subprocess import Popen, PIPE
_nameprog = re.compile('^(\S+)')
def main(args):
of = sys.stdout
if args.output and args.output[-4:] == '.bam':
cmd = 'samtools view -Sb - -o '+args.output
pof = Popen(cmd.split(),stdin=PIPE)
of = pof.stdin
elif args.output:
of = open(args.output,'w')
"""Use the valid input file to get the header information."""
header = None
if args.HQ:
cmd = 'samtools view -H '+args.HQ
sys.stderr.write(cmd+"\n")
header = Popen(cmd.split(),stdout=PIPE).communicate()[0]
of.write(header)
if (not header) and args.HQCorrected:
cmd = 'samtools view -H '+args.HQCorrected
sys.stderr.write(cmd+"\n")
header = Popen(cmd.split(),stdout=PIPE).communicate()[0]
of.write(header)
if (not header) and args.AQ:
cmd = 'samtools view -H '+args.AQ
sys.stderr.write(cmd+"\n")
header = Popen(cmd.split(),stdout=PIPE).communicate()[0]
of.write(header)
if (not header) and args.AQCorrected:
cmd = 'samtools view -H '+args.AQCorrected
sys.stderr.write(cmd+"\n")
header = Popen(cmd.split(),stdout=PIPE).communicate()[0]
of.write(header)
if (not header) and args.subreads:
cmd = 'samtools view -H '+args.subreads
sys.stderr.write(cmd+"\n")
header = Popen(cmd.split(),stdout=PIPE).communicate()[0]
of.write(header)
if (not header) and args.subreadsCorrected:
cmd = 'samtools view -H '+args.subreadsCorrected
sys.stderr.write(cmd+"\n")
header = Popen(cmd.split(),stdout=PIPE).communicate()[0]
of.write(header)
_nameprog = re.compile('^(\S+)')
negative_filter = set() # remove these
""" Next read throught he alignments THAT ALIGNED in order of priority"""
negative_filter = get_best_set(negative_filter,'-F 4',of,args,True)
"""After traversing all the aligned reads we can do it all over again
this time with the unaligned portion of reads"""
""" Finally go through the reads that did NOT ALIGN to get anything left"""
get_best_set(negative_filter,'-f 4',of,args,False)
if args.output and args.output[-4:] == '.bam':
pof.communicate()
else:
of.close()
def get_best_set(negative_filter,flag,of,args,aligned):
if args.HQ:
"""If we have the highest quality ccs reads use those first"""
cmd = 'samtools view '+flag+' '+args.HQ
sys.stderr.write(cmd+"\n")
p = Popen(cmd.split(),stdout=PIPE)
negative_filter = _traverse_unobserved(p.stdout,negative_filter,of)
p.communicate()
sys.stderr.write("molecules written: "+str(len(negative_filter))+"\n")
if args.HQCorrected:
"""If we have the highest quality corrected ccs reads use those next"""
cmd = 'samtools view '+flag+' '+args.HQCorrected
sys.stderr.write(cmd+"\n")
p = Popen(cmd.split(),stdout=PIPE)
negative_filter = _traverse_unobserved(p.stdout,negative_filter,of)
p.communicate()
sys.stderr.write("molecules written: "+str(len(negative_filter))+"\n")
if args.AQCorrected:
"""If we have the any quality corrected ccs reads use those next"""
cmd = 'samtools view '+flag+' '+args.AQCorrected
sys.stderr.write(cmd+"\n")
p = Popen(cmd.split(),stdout=PIPE)
negative_filter = _traverse_unobserved(p.stdout,negative_filter,of)
p.communicate()
sys.stderr.write("molecules written: "+str(len(negative_filter))+"\n")
if args.AQ:
"""If we have the lower quality ccs reads use those next"""
cmd = 'samtools view '+flag+' '+args.AQ
sys.stderr.write(cmd+"\n")
p = Popen(cmd.split(),stdout=PIPE)
negative_filter = _traverse_unobserved(p.stdout,negative_filter,of)
p.communicate()
sys.stderr.write("molecules written: "+str(len(negative_filter))+"\n")
if args.subreadsCorrected:
"""If we have corrected subreads reads use those but we need to pick the best alignment for each molecule.
The first pass through is just to get information on which is the best alignment"""
negative_filter = _do_subread_set(flag,args.subreadsCorrected,of,negative_filter,aligned)
if args.subreads:
"""If we have subreads reads use those but we need to pick the best alignment for each molecule.
The first pass through is just to get information on which is the best alignment"""
negative_filter = _do_subread_set(flag,args.subreads,of,negative_filter,aligned)
return negative_filter
def _do_subread_set(flag,input_file,of,negative_filter,aligned):
best = {}
cmd = 'samtools view '+flag+' '+input_file
sys.stderr.write(cmd+"\n")
p = Popen(cmd.split(),stdout=PIPE)
z = 0
for line in p.stdout:
z += 1
if z%10000==0: sys.stderr.write(str(z) + " subread alignment paths scanned for alignment length\r")
pbname = PacBioReadName(_nameprog.match(line).group(1))
mol = pbname.get_molecule()
if mol in negative_filter: continue
name = pbname.name()
sam = SAM(line)
c = 0 # aligned base count if we are aligned, subread length if we are not aligned
if aligned:
c = sam.get_aligned_bases_count()
else:
c = sam.get_query_length()
if mol not in best:
best[mol] = [name,c]
if c > best[mol][1]: best[mol] = [name,c]
p.communicate()
sys.stderr.write("\n")
sys.stderr.write("Finished analyzing subread lengths\nWriting aligned subreads\n")
"""After getting all the best alignment counts we can traverse again
to keep the best"""
cmd = 'samtools view '+flag+' '+input_file
sys.stderr.write(cmd+"\n")
z = 0
p = Popen(cmd.split(),stdout=PIPE)
for line in p.stdout:
z += 1
if z%10000==0: sys.stderr.write(str(z) + " subreads alignment paths scanned during selected for best\r")
pbname = PacBioReadName(_nameprog.match(line).group(1))
mol = pbname.get_molecule()
name = pbname.name()
if mol in negative_filter: continue
if not best[mol][0] == name: continue
of.write(line)
p.communicate()
for mol in best: negative_filter.add(mol)
sys.stderr.write("\n")
sys.stderr.write("molecules written: "+str(len(negative_filter))+"\n")
return negative_filter
def _traverse_unobserved(stream,negative_filter,of):
"""Go through a stream and print out anything not in observed set"""
observed = set()
for line in stream:
name = PacBioReadName(_nameprog.match(line).group(1))
if name.get_molecule() not in negative_filter: of.write(line)
observed.add(name.get_molecule())
return negative_filter|observed
def do_inputs():
# Setup command line inputs
parser=argparse.ArgumentParser(description="Get a set of best molecule alignments. Prioritize on HQ. This script does not account for corrected reads. That will be another script. Requires samtools.",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--HQ',help="highest quality CCS reads. This is the highest priority to use")
parser.add_argument('--HQCorrected',help="highest quality corrected CCS reads. This is lower priority to use than the uncorrected high quality.")
parser.add_argument('--AQ',help="any quality CCS reads of a broad range of qualities (usually would includes HQ). This is lower priority to use than the corrected any quality.")
parser.add_argument('--AQCorrected',help="Any quality CCS reads corrected. This is higher priority to use than the uncorrected any quality.")
parser.add_argument('--subreads',help="the subreads")
parser.add_argument('--subreadsCorrected',help="Any quality subread corrected.")
parser.add_argument('--output','-o',help="Specifiy path to write index")
args = parser.parse_args()
if not (args.HQ or args.HQCorrected or args.AQ or args.AQCorrected \
or args.subreads or args.subreadsCorrected):
parser.error('must one or more alignment file --HQ --HQCorrected --AQ --AQCorrected --subreads --subreadsCorrected')
return args
def external_cmd(cmd):
cache_argv = sys.argv
sys.argv = cmd.split()
args = do_inputs()
main(args)
sys.argv = cache_argv
if __name__=="__main__":
args = do_inputs()
main(args)
|
{
"content_hash": "62e8919c6ee7573ae65f9fb47283078e",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 391,
"avg_line_length": 50.38425925925926,
"alnum_prop": 0.7081687034824956,
"repo_name": "jason-weirather/py-seq-tools",
"id": "9b3ab85eaade2cf5a29277c7fe624799518cb3ba",
"size": "10883",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "seqtools/cli/utilities/pacbio_best_molecule_alignments.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "973966"
}
],
"symlink_target": ""
}
|
from runner.koan import *
import functools
class AboutDecoratingWithClasses(Koan):
def maximum(self, a, b):
if a>b:
return a
else:
return b
def test_partial_that_wrappers_no_args(self):
"""
Before we can understand this type of decorator we need to consider
the partial.
"""
max = functools.partial(self.maximum)
self.assertEqual(23, max(7,23))
self.assertEqual(10, max(10,-10))
def test_partial_that_wrappers_first_arg(self):
max0 = functools.partial(self.maximum, 0)
self.assertEqual(0, max0(-4))
self.assertEqual(5, max0(5))
def test_partial_that_wrappers_all_args(self):
always99 = functools.partial(self.maximum, 99, 20)
always20 = functools.partial(self.maximum, 9, 20)
self.assertEqual(99, always99())
self.assertEqual(20, always20())
# ------------------------------------------------------------------
class doubleit:
def __init__(self, fn):
self.fn = fn
def __call__(self, *args):
return self.fn(*args) + ', ' + self.fn(*args)
def __get__(self, obj, cls=None):
if not obj:
# Decorating an unbound function
return self
else:
# Decorating a bound method
return functools.partial(self, obj)
@doubleit
def foo(self):
return "foo"
@doubleit
def parrot(self, text):
return text.upper()
def test_decorator_with_no_arguments(self):
# To clarify: the decorator above the function has no arguments, even
# if the decorated function does
self.assertEqual('foo, foo', self.foo())
self.assertEqual('PIECES OF EIGHT, PIECES OF EIGHT', self.parrot('pieces of eight'))
# ------------------------------------------------------------------
def sound_check(self):
#Note: no decorator
return "Testing..."
def test_what_a_decorator_is_doing_to_a_function(self):
#wrap the function with the decorator
self.sound_check = self.doubleit(self.sound_check)
self.assertEqual('Testing..., Testing...', self.sound_check())
# ------------------------------------------------------------------
class documenter:
def __init__(self, *args):
self.fn_doc = args[0]
def __call__(self, fn):
def decorated_function(*args):
return fn(*args)
if fn.__doc__:
decorated_function.__doc__ = fn.__doc__ + ": " + self.fn_doc
else:
decorated_function.__doc__ = self.fn_doc
return decorated_function
@documenter("Increments a value by one. Kind of.")
def count_badly(self, num):
num += 1
if num==3:
return 5
else:
return num
@documenter("Does nothing")
def idler(self, num):
"Idler"
pass
def test_decorator_with_an_argument(self):
self.assertEqual(5, self.count_badly(2))
self.assertEqual("Increments a value by one. Kind of.", self.count_badly.__doc__)
def test_documentor_which_already_has_a_docstring(self):
self.assertEqual("Idler: Does nothing", self.idler.__doc__)
# ------------------------------------------------------------------
@documenter("DOH!")
@doubleit
@doubleit
def homer(self):
return "D'oh"
def test_we_can_chain_decorators(self):
self.assertEqual("D'oh, D'oh, D'oh, D'oh", self.homer())
self.assertEqual("DOH!", self.homer.__doc__)
|
{
"content_hash": "91d59280180de03550979fe56ff8cd15",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 92,
"avg_line_length": 29.344,
"alnum_prop": 0.5226281352235551,
"repo_name": "ducngtuan/my-python3-koans-solution",
"id": "3e764ba8ee8555d04ad29ef76f12a32f62566f68",
"size": "3715",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python3/koans/about_decorating_with_classes.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "4524"
},
{
"name": "Python",
"bytes": "323126"
},
{
"name": "Ruby",
"bytes": "48"
},
{
"name": "Shell",
"bytes": "1637"
}
],
"symlink_target": ""
}
|
def extractTranslatingboredomCom(item):
'''
Parser for 'translatingboredom.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
{
"content_hash": "88e0ee357c6e279c19ab7f995a76a139",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 104,
"avg_line_length": 26.476190476190474,
"alnum_prop": 0.6348920863309353,
"repo_name": "fake-name/ReadableWebProxy",
"id": "d5e677dabadcb469a8ae62baf73903791d8a42c2",
"size": "557",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractTranslatingboredomCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
}
|
from hwt.code import If
from hwt.code_utils import rename_signal
from hwt.hdl.types.bits import Bits
from hwt.interfaces.std import VldSynced
from hwt.interfaces.utils import addClkRstn, propagateClkRstn
from hwt.serializer.mode import serializeParamsUniq
from hwt.synthesizer.param import Param
from hwt.synthesizer.rtlLevel.rtlSignal import RtlSignal
from hwt.synthesizer.unit import Unit
from hwtLib.amba.axi4 import Axi4, Axi4_r
from hwtLib.amba.axi_comp.lsu.write_aggregator_write_dispatcher import AxiWriteAggregatorWriteDispatcher
from hwtLib.amba.axis_comp.fifoCopy import AxiSFifoCopy, AxiSRegCopy
from hwtLib.amba.axis_comp.reg import AxiSReg
from hwtLib.handshaked.reg import HandshakedReg
from hwtLib.handshaked.streamNode import StreamNode
from hwtLib.logic.binToOneHot import binToOneHot
from hwtLib.logic.oneHotToBin import oneHotToBin
from hwtLib.mem.cam import CamMultiPort
from pyMathBitPrecise.bit_utils import apply_set_and_clear
@serializeParamsUniq
class AxiReadAggregator(Unit):
"""
This is a component which reduces reads from same address.
This component has several slots for read transactions, Each slot has it's own address record in CAM which is used
to detect reads from same address, if the read is from same address which is currently being loaded. The read thread
is put to sleep until data for previous read is received. After data is received it is copied as a response also
for this transaction.
.. figure:: ./_static/AxiReadAggregator.png
.. hwt-autodoc:: _example_AxiReadAggregator
"""
def _config(self):
Axi4._config(self)
self.CACHE_LINE_SIZE = Param(64) # [B]
def _declr(self):
AxiWriteAggregatorWriteDispatcher.precompute_constants(self)
addClkRstn(self)
with self._paramsShared():
self.s = Axi4()
self.m = Axi4()._m()
if self.BUS_WORDS_IN_CACHE_LINE > 1:
fb = AxiSFifoCopy(Axi4_r)
fb.DEPTH = 2 * self.BUS_WORDS_IN_CACHE_LINE
else:
fb = AxiSRegCopy(Axi4_r)
self.frame_buff = fb
ac = self.addr_cam = CamMultiPort()
ac.MATCH_PORT_CNT = 1
ac.ITEMS = 2 ** self.ID_WIDTH
ac.USE_VLD_BIT = False
ac.KEY_WIDTH = self.CACHE_LINE_ADDR_WIDTH
for i in [self.s, self.m]:
i.HAS_W = False
def read_data_section(self, read_ack: RtlSignal,
waiting_transaction_id: RtlSignal,
waiting_transaction_vld: RtlSignal,
data_copy_override: VldSynced):
s = self.s
m = self.m
fb = self.frame_buff
data_out_node = StreamNode([fb.dataOut], [s.r])
data_out_node.sync()
read_ack(data_out_node.ack())
fb.dataOut_copy_frame(
(fb.dataOut.valid & fb.dataOut.last & waiting_transaction_vld[fb.dataOut.id]) |
data_copy_override.vld
)
If(data_copy_override.vld,
fb.dataOut_replacement_id(data_copy_override.data)
).Else(
fb.dataOut_replacement_id(waiting_transaction_id[fb.dataOut.id])
)
s.r(fb.dataOut, exclude={s.r.valid, s.r.ready})
StreamNode(
[m.r],
[fb.dataIn],
).sync()
fb.dataIn(m.r, exclude={m.r.valid, m.r.ready})
def add_addr_cam_out_reg(self, item_vld:RtlSignal):
addr_cam = self.addr_cam
addr_cam_out = addr_cam.out[0] #HsBuilder(self, addr_cam.out).buff(1).end
addr_cam_out_reg = HandshakedReg(addr_cam_out.__class__)
addr_cam_out_reg._updateParamsFrom(addr_cam_out)
self.addr_cam_out_reg = addr_cam_out_reg
addr_cam_out_reg.dataIn(addr_cam_out, exclude=[addr_cam_out.data])
addr_cam_out_reg.dataIn.data(addr_cam_out.data & item_vld)
addr_cam_out = addr_cam_out_reg.dataOut
return addr_cam_out
def read_request_section(self, read_ack: RtlSignal,
item_vld: RtlSignal,
waiting_transaction_id: RtlSignal,
waiting_transaction_vld: RtlSignal,
data_copy_override: VldSynced):
s = self.s
m = self.m
addr_cam = self.addr_cam
ITEMS = addr_cam.ITEMS
addr_cam_out = self.add_addr_cam_out_reg(item_vld)
with self._paramsShared():
s_ar_tmp = self.s_ar_tmp = AxiSReg(s.AR_CLS)
last_cam_insert_match = self._reg("last_cam_insert_match", Bits(ITEMS), def_val=0)
match_res = rename_signal(
self,
item_vld &
(addr_cam_out.data | last_cam_insert_match) &
~waiting_transaction_vld,
"match_res")
blocking_access = rename_signal(
self,
s.ar.valid &
(
item_vld[s.ar.id] |
(s_ar_tmp.dataOut.valid & (s.ar.id._eq(s_ar_tmp.dataOut.id)))
),
"blocking_access")
s_ar_node = StreamNode(
[s.ar],
[addr_cam.match[0], s_ar_tmp.dataIn],
)
s_ar_node.sync(~blocking_access)
# s_ar_node_ack = s_ar_node.ack() & ~blocking_access
s_ar_tmp.dataIn(s.ar, exclude={s.ar.valid, s.ar.ready})
parent_transaction_id = oneHotToBin(self, match_res, "parent_transaction_id")
m_ar_node = StreamNode(
[s_ar_tmp.dataOut, addr_cam_out],
[m.ar],
extraConds={m.ar: match_res._eq(0)},
skipWhen={m.ar: match_res != 0},
)
m_ar_node.sync()
m.ar(s_ar_tmp.dataOut, exclude={m.ar.valid, m.ar.ready})
addr_cam.match[0].data(s.ar.addr[:self.CACHE_LINE_OFFSET_BITS])
ar_ack = rename_signal(self, m_ar_node.ack(), "ar_ack")
# insert into cam on empty position specified by id of this transaction
acw = addr_cam.write
acw.addr(s_ar_tmp.dataOut.id)
acw.data(s_ar_tmp.dataOut.addr[:self.CACHE_LINE_OFFSET_BITS])
acw.vld(addr_cam_out.vld)
#If(s_ar_node_ack,
last_cam_insert_match(binToOneHot(
s_ar_tmp.dataOut.id,
en=~blocking_access &
s.ar.valid &
s_ar_tmp.dataOut.valid &
s_ar_tmp.dataOut.addr[:self.CACHE_LINE_OFFSET_BITS]._eq(s.ar.addr[:self.CACHE_LINE_OFFSET_BITS])
))
#)
for trans_id in range(ITEMS):
# it becomes ready if we are requested for it on "s" interface
this_trans_start = s_ar_tmp.dataOut.id._eq(trans_id) & \
(data_copy_override.vld | ar_ack)
# item becomes invalid if we read last data word
this_trans_end = read_ack & s.r.id._eq(trans_id) & s.r.last
this_trans_end = rename_signal(self, this_trans_end, f"this_trans_end{trans_id:d}")
item_vld[trans_id](apply_set_and_clear(item_vld[trans_id], this_trans_start, this_trans_end))
waiting_transaction_start = (
ar_ack &
(match_res != 0) &
parent_transaction_id._eq(trans_id) &
~this_trans_end
)
# note: this_trans_end in this context is for parent transactio
# which was not started just now, so it may be ending just now
waiting_transaction_start = rename_signal(self, waiting_transaction_start, f"waiting_transaction_start{trans_id:d}")
_waiting_transaction_vld = apply_set_and_clear(
waiting_transaction_vld[trans_id],
waiting_transaction_start,
this_trans_end)
waiting_transaction_vld[trans_id](rename_signal(self, _waiting_transaction_vld, f"waiting_transaction_vld{trans_id:d}"))
If(self.clk._onRisingEdge(),
If((match_res != 0) & ar_ack,
waiting_transaction_id[parent_transaction_id](s_ar_tmp.dataOut.id)
)
)
# parent transaction is finishing just now
# we need to quickly grab the data in data buffer and copy it also
# for this transaction
data_copy_override.vld(
s_ar_tmp.dataOut.valid &
read_ack &
(match_res != 0) &
s.r.id._eq(parent_transaction_id) &
s.r.last)
data_copy_override.data(s_ar_tmp.dataOut.id)
def _impl(self):
ITEMS = self.addr_cam.ITEMS
item_vld = self._reg("item_vld", Bits(ITEMS), def_val=0)
waiting_transaction_id = self._sig("waiting_transaction_id", self.s.ar.id._dtype[ITEMS])
waiting_transaction_vld = self._reg("waiting_transaction_vld", Bits(ITEMS), def_val=0)
read_ack = self._sig("read_ack")
# if the parent transaction is about to finish how we need to copy the response now
data_copy_override = VldSynced()
data_copy_override.DATA_WIDTH = self.ID_WIDTH
self.data_copy_override = data_copy_override
self.read_request_section(
read_ack, item_vld, waiting_transaction_id, waiting_transaction_vld,
data_copy_override)
self.read_data_section(
read_ack, waiting_transaction_id, waiting_transaction_vld, data_copy_override)
propagateClkRstn(self)
def _example_AxiReadAggregator():
u = AxiReadAggregator()
u.ID_WIDTH = 2
return u
if __name__ == "__main__":
from hwt.synthesizer.utils import to_rtl_str
u = _example_AxiReadAggregator()
u.DATA_WIDTH = 128
u.CACHE_LINE_SIZE = 16
u.ID_WIDTH = 6
print(to_rtl_str(u))
|
{
"content_hash": "2b02ae75bc5b1a47bf85b60f24d181d6",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 132,
"avg_line_length": 39.11020408163265,
"alnum_prop": 0.5978918806094761,
"repo_name": "Nic30/hwtLib",
"id": "48843948af767240f1663954cff9ca02de010a0c",
"size": "9630",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hwtLib/amba/axi_comp/lsu/read_aggregator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "41560"
},
{
"name": "Python",
"bytes": "2523349"
},
{
"name": "VHDL",
"bytes": "117346"
},
{
"name": "Verilog",
"bytes": "36444"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import print_function
__all__ = ["json_encoder", "utils"]
import logging
from .utils import TqdmLoggingHandler
logger = logging.getLogger("root_optimize")
logger.addHandler(TqdmLoggingHandler())
|
{
"content_hash": "fde101a28da9d1ffd6d759e9181e1e92",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 43,
"avg_line_length": 25.3,
"alnum_prop": 0.758893280632411,
"repo_name": "kratsg/optimization",
"id": "bf0fc9cfcfbf3b5d2b8e888bf253b5cabf282295",
"size": "300",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/root_optimize/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1235"
},
{
"name": "HCL",
"bytes": "823"
},
{
"name": "Makefile",
"bytes": "651"
},
{
"name": "Python",
"bytes": "125640"
},
{
"name": "Shell",
"bytes": "3157"
}
],
"symlink_target": ""
}
|
from django.forms import ModelForm, DateInput
from django import forms
from survey.models.households import HouseholdMember
class HouseholdMemberForm(ModelForm):
def __init__(self, *args, **kwargs):
super(HouseholdMemberForm, self).__init__(*args, **kwargs)
class Meta:
model = HouseholdMember
fields = ['surname', 'first_name', 'date_of_birth', 'male']
widgets = {
'surname': forms.TextInput(attrs={'placeholder': 'Family Name'}),
'first_name': forms.TextInput(attrs={'placeholder': 'Other Names'}),
'male': forms.RadioSelect(choices=((True, 'Male'), (False, 'Female'))),
}
date_of_birth = forms.DateField(label="Date of birth", widget=DateInput(attrs={'class': 'datepicker'}),
required=True, input_formats=["%Y-%m-%d"])
|
{
"content_hash": "8e6cabe1f78c1c637d6210b53903dea4",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 107,
"avg_line_length": 42.45,
"alnum_prop": 0.6136631330977621,
"repo_name": "antsmc2/mics",
"id": "af34172ae696fd87922a94101eba5342d025d0f5",
"size": "849",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "survey/forms/householdMember.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "37725"
},
{
"name": "JavaScript",
"bytes": "390607"
},
{
"name": "Python",
"bytes": "5206913"
},
{
"name": "Shell",
"bytes": "1277"
}
],
"symlink_target": ""
}
|
import random
import netaddr
from oslo.config import cfg
from sqlalchemy import event
from sqlalchemy import orm
from sqlalchemy.orm import exc
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.common import exceptions as n_exc
from neutron import context as ctx
from neutron.db import api as db
from neutron.db import models_v2
from neutron.db import sqlalchemyutils
from neutron.extensions import l3
from neutron import manager
from neutron import neutron_plugin_base_v2
from neutron.notifiers import nova
from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants as service_constants
LOG = logging.getLogger(__name__)
# Ports with the following 'device_owner' values will not prevent
# network deletion. If delete_network() finds that all ports on a
# network have these owners, it will explicitly delete each port
# and allow network deletion to continue. Similarly, if delete_subnet()
# finds out that all existing IP Allocations are associated with ports
# with these owners, it will allow subnet deletion to proceed with the
# IP allocations being cleaned up by cascade.
AUTO_DELETE_PORT_OWNERS = [constants.DEVICE_OWNER_DHCP]
class CommonDbMixin(object):
"""Common methods used in core and service plugins."""
# Plugins, mixin classes implementing extension will register
# hooks into the dict below for "augmenting" the "core way" of
# building a query for retrieving objects from a model class.
# To this aim, the register_model_query_hook and unregister_query_hook
# from this class should be invoked
_model_query_hooks = {}
# This dictionary will store methods for extending attributes of
# api resources. Mixins can use this dict for adding their own methods
# TODO(salvatore-orlando): Avoid using class-level variables
_dict_extend_functions = {}
@classmethod
def register_model_query_hook(cls, model, name, query_hook, filter_hook,
result_filters=None):
"""Register a hook to be invoked when a query is executed.
Add the hooks to the _model_query_hooks dict. Models are the keys
of this dict, whereas the value is another dict mapping hook names to
callables performing the hook.
Each hook has a "query" component, used to build the query expression
and a "filter" component, which is used to build the filter expression.
Query hooks take as input the query being built and return a
transformed query expression.
Filter hooks take as input the filter expression being built and return
a transformed filter expression
"""
model_hooks = cls._model_query_hooks.get(model)
if not model_hooks:
# add key to dict
model_hooks = {}
cls._model_query_hooks[model] = model_hooks
model_hooks[name] = {'query': query_hook, 'filter': filter_hook,
'result_filters': result_filters}
def _model_query(self, context, model):
query = context.session.query(model)
# define basic filter condition for model query
# NOTE(jkoelker) non-admin queries are scoped to their tenant_id
# NOTE(salvatore-orlando): unless the model allows for shared objects
query_filter = None
if not context.is_admin and hasattr(model, 'tenant_id'):
if hasattr(model, 'shared'):
query_filter = ((model.tenant_id == context.tenant_id) |
(model.shared == True))
else:
query_filter = (model.tenant_id == context.tenant_id)
# Execute query hooks registered from mixins and plugins
for _name, hooks in self._model_query_hooks.get(model,
{}).iteritems():
query_hook = hooks.get('query')
if isinstance(query_hook, basestring):
query_hook = getattr(self, query_hook, None)
if query_hook:
query = query_hook(context, model, query)
filter_hook = hooks.get('filter')
if isinstance(filter_hook, basestring):
filter_hook = getattr(self, filter_hook, None)
if filter_hook:
query_filter = filter_hook(context, model, query_filter)
# NOTE(salvatore-orlando): 'if query_filter' will try to evaluate the
# condition, raising an exception
if query_filter is not None:
query = query.filter(query_filter)
return query
def _fields(self, resource, fields):
if fields:
return dict(((key, item) for key, item in resource.items()
if key in fields))
return resource
def _get_tenant_id_for_create(self, context, resource):
if context.is_admin and 'tenant_id' in resource:
tenant_id = resource['tenant_id']
elif ('tenant_id' in resource and
resource['tenant_id'] != context.tenant_id):
reason = _('Cannot create resource for another tenant')
raise n_exc.AdminRequired(reason=reason)
else:
tenant_id = context.tenant_id
return tenant_id
def _get_by_id(self, context, model, id):
query = self._model_query(context, model)
return query.filter(model.id == id).one()
def _apply_filters_to_query(self, query, model, filters):
if filters:
for key, value in filters.iteritems():
column = getattr(model, key, None)
if column:
query = query.filter(column.in_(value))
for _name, hooks in self._model_query_hooks.get(model,
{}).iteritems():
result_filter = hooks.get('result_filters', None)
if isinstance(result_filter, basestring):
result_filter = getattr(self, result_filter, None)
if result_filter:
query = result_filter(query, filters)
return query
def _apply_dict_extend_functions(self, resource_type,
response, db_object):
for func in self._dict_extend_functions.get(
resource_type, []):
args = (response, db_object)
if isinstance(func, basestring):
func = getattr(self, func, None)
else:
# must call unbound method - use self as 1st argument
args = (self,) + args
if func:
func(*args)
def _get_collection_query(self, context, model, filters=None,
sorts=None, limit=None, marker_obj=None,
page_reverse=False):
collection = self._model_query(context, model)
collection = self._apply_filters_to_query(collection, model, filters)
if limit and page_reverse and sorts:
sorts = [(s[0], not s[1]) for s in sorts]
collection = sqlalchemyutils.paginate_query(collection, model, limit,
sorts,
marker_obj=marker_obj)
return collection
def _get_collection(self, context, model, dict_func, filters=None,
fields=None, sorts=None, limit=None, marker_obj=None,
page_reverse=False):
query = self._get_collection_query(context, model, filters=filters,
sorts=sorts,
limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
items = [dict_func(c, fields) for c in query]
if limit and page_reverse:
items.reverse()
return items
def _get_collection_count(self, context, model, filters=None):
return self._get_collection_query(context, model, filters).count()
def _get_marker_obj(self, context, resource, limit, marker):
if limit and marker:
return getattr(self, '_get_%s' % resource)(context, marker)
return None
class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2,
CommonDbMixin):
"""V2 Neutron plugin interface implementation using SQLAlchemy models.
Whenever a non-read call happens the plugin will call an event handler
class method (e.g., network_created()). The result is that this class
can be sub-classed by other classes that add custom behaviors on certain
events.
"""
# This attribute specifies whether the plugin supports or not
# bulk/pagination/sorting operations. Name mangling is used in
# order to ensure it is qualified by class
__native_bulk_support = True
__native_pagination_support = True
__native_sorting_support = True
def __init__(self):
db.configure_db()
if cfg.CONF.notify_nova_on_port_status_changes:
# NOTE(arosen) These event listners are here to hook into when
# port status changes and notify nova about their change.
self.nova_notifier = nova.Notifier()
event.listen(models_v2.Port, 'after_insert',
self.nova_notifier.send_port_status)
event.listen(models_v2.Port, 'after_update',
self.nova_notifier.send_port_status)
event.listen(models_v2.Port.status, 'set',
self.nova_notifier.record_port_status_changed)
@classmethod
def register_dict_extend_funcs(cls, resource, funcs):
cur_funcs = cls._dict_extend_functions.get(resource, [])
cur_funcs.extend(funcs)
cls._dict_extend_functions[resource] = cur_funcs
def _filter_non_model_columns(self, data, model):
"""Remove all the attributes from data which are not columns of
the model passed as second parameter.
"""
columns = [c.name for c in model.__table__.columns]
return dict((k, v) for (k, v) in
data.iteritems() if k in columns)
def _get_network(self, context, id):
try:
network = self._get_by_id(context, models_v2.Network, id)
except exc.NoResultFound:
raise n_exc.NetworkNotFound(net_id=id)
return network
def _get_subnet(self, context, id):
try:
subnet = self._get_by_id(context, models_v2.Subnet, id)
except exc.NoResultFound:
raise n_exc.SubnetNotFound(subnet_id=id)
return subnet
def _get_port(self, context, id):
try:
port = self._get_by_id(context, models_v2.Port, id)
except exc.NoResultFound:
raise n_exc.PortNotFound(port_id=id)
return port
def _get_dns_by_subnet(self, context, subnet_id):
dns_qry = context.session.query(models_v2.DNSNameServer)
return dns_qry.filter_by(subnet_id=subnet_id).all()
def _get_route_by_subnet(self, context, subnet_id):
route_qry = context.session.query(models_v2.SubnetRoute)
return route_qry.filter_by(subnet_id=subnet_id).all()
def _get_subnets_by_network(self, context, network_id):
subnet_qry = context.session.query(models_v2.Subnet)
return subnet_qry.filter_by(network_id=network_id).all()
def _get_all_subnets(self, context):
# NOTE(salvatore-orlando): This query might end up putting
# a lot of stress on the db. Consider adding a cache layer
return context.session.query(models_v2.Subnet).all()
@staticmethod
def _generate_mac(context, network_id):
base_mac = cfg.CONF.base_mac.split(':')
max_retries = cfg.CONF.mac_generation_retries
for i in range(max_retries):
mac = [int(base_mac[0], 16), int(base_mac[1], 16),
int(base_mac[2], 16), random.randint(0x00, 0xff),
random.randint(0x00, 0xff), random.randint(0x00, 0xff)]
if base_mac[3] != '00':
mac[3] = int(base_mac[3], 16)
mac_address = ':'.join(map(lambda x: "%02x" % x, mac))
if NeutronDbPluginV2._check_unique_mac(context, network_id,
mac_address):
LOG.debug(_("Generated mac for network %(network_id)s "
"is %(mac_address)s"),
{'network_id': network_id,
'mac_address': mac_address})
return mac_address
else:
LOG.debug(_("Generated mac %(mac_address)s exists. Remaining "
"attempts %(max_retries)s."),
{'mac_address': mac_address,
'max_retries': max_retries - (i + 1)})
LOG.error(_("Unable to generate mac address after %s attempts"),
max_retries)
raise n_exc.MacAddressGenerationFailure(net_id=network_id)
@staticmethod
def _check_unique_mac(context, network_id, mac_address):
mac_qry = context.session.query(models_v2.Port)
try:
mac_qry.filter_by(network_id=network_id,
mac_address=mac_address).one()
except exc.NoResultFound:
return True
return False
@staticmethod
def _delete_ip_allocation(context, network_id, subnet_id, ip_address):
# Delete the IP address from the IPAllocate table
LOG.debug(_("Delete allocated IP %(ip_address)s "
"(%(network_id)s/%(subnet_id)s)"),
{'ip_address': ip_address,
'network_id': network_id,
'subnet_id': subnet_id})
context.session.query(models_v2.IPAllocation).filter_by(
network_id=network_id,
ip_address=ip_address,
subnet_id=subnet_id).delete()
@staticmethod
def _generate_ip(context, subnets):
try:
return NeutronDbPluginV2._try_generate_ip(context, subnets)
except n_exc.IpAddressGenerationFailure:
NeutronDbPluginV2._rebuild_availability_ranges(context, subnets)
return NeutronDbPluginV2._try_generate_ip(context, subnets)
@staticmethod
def _try_generate_ip(context, subnets):
"""Generate an IP address.
The IP address will be generated from one of the subnets defined on
the network.
"""
range_qry = context.session.query(
models_v2.IPAvailabilityRange).join(
models_v2.IPAllocationPool).with_lockmode('update')
for subnet in subnets:
range = range_qry.filter_by(subnet_id=subnet['id']).first()
if not range:
LOG.debug(_("All IPs from subnet %(subnet_id)s (%(cidr)s) "
"allocated"),
{'subnet_id': subnet['id'], 'cidr': subnet['cidr']})
continue
ip_address = range['first_ip']
LOG.debug(_("Allocated IP - %(ip_address)s from %(first_ip)s "
"to %(last_ip)s"),
{'ip_address': ip_address,
'first_ip': range['first_ip'],
'last_ip': range['last_ip']})
if range['first_ip'] == range['last_ip']:
# No more free indices on subnet => delete
LOG.debug(_("No more free IP's in slice. Deleting allocation "
"pool."))
context.session.delete(range)
else:
# increment the first free
range['first_ip'] = str(netaddr.IPAddress(ip_address) + 1)
return {'ip_address': ip_address, 'subnet_id': subnet['id']}
raise n_exc.IpAddressGenerationFailure(net_id=subnets[0]['network_id'])
@staticmethod
def _rebuild_availability_ranges(context, subnets):
ip_qry = context.session.query(
models_v2.IPAllocation).with_lockmode('update')
# PostgreSQL does not support select...for update with an outer join.
# No join is needed here.
pool_qry = context.session.query(
models_v2.IPAllocationPool).options(
orm.noload('available_ranges')).with_lockmode('update')
for subnet in sorted(subnets):
LOG.debug(_("Rebuilding availability ranges for subnet %s")
% subnet)
# Create a set of all currently allocated addresses
ip_qry_results = ip_qry.filter_by(subnet_id=subnet['id'])
allocations = netaddr.IPSet([netaddr.IPAddress(i['ip_address'])
for i in ip_qry_results])
for pool in pool_qry.filter_by(subnet_id=subnet['id']):
# Create a set of all addresses in the pool
poolset = netaddr.IPSet(netaddr.iter_iprange(pool['first_ip'],
pool['last_ip']))
# Use set difference to find free addresses in the pool
available = poolset - allocations
# Generator compacts an ip set into contiguous ranges
def ipset_to_ranges(ipset):
first, last = None, None
for cidr in ipset.iter_cidrs():
if last and last + 1 != cidr.first:
yield netaddr.IPRange(first, last)
first = None
first, last = first if first else cidr.first, cidr.last
if first:
yield netaddr.IPRange(first, last)
# Write the ranges to the db
for range in ipset_to_ranges(available):
available_range = models_v2.IPAvailabilityRange(
allocation_pool_id=pool['id'],
first_ip=str(netaddr.IPAddress(range.first)),
last_ip=str(netaddr.IPAddress(range.last)))
context.session.add(available_range)
@staticmethod
def _allocate_specific_ip(context, subnet_id, ip_address):
"""Allocate a specific IP address on the subnet."""
ip = int(netaddr.IPAddress(ip_address))
range_qry = context.session.query(
models_v2.IPAvailabilityRange).join(
models_v2.IPAllocationPool).with_lockmode('update')
results = range_qry.filter_by(subnet_id=subnet_id)
for range in results:
first = int(netaddr.IPAddress(range['first_ip']))
last = int(netaddr.IPAddress(range['last_ip']))
if first <= ip <= last:
if first == last:
context.session.delete(range)
return
elif first == ip:
range['first_ip'] = str(netaddr.IPAddress(ip_address) + 1)
return
elif last == ip:
range['last_ip'] = str(netaddr.IPAddress(ip_address) - 1)
return
else:
# Split into two ranges
new_first = str(netaddr.IPAddress(ip_address) + 1)
new_last = range['last_ip']
range['last_ip'] = str(netaddr.IPAddress(ip_address) - 1)
ip_range = models_v2.IPAvailabilityRange(
allocation_pool_id=range['allocation_pool_id'],
first_ip=new_first,
last_ip=new_last)
context.session.add(ip_range)
return
@staticmethod
def _check_unique_ip(context, network_id, subnet_id, ip_address):
"""Validate that the IP address on the subnet is not in use."""
ip_qry = context.session.query(models_v2.IPAllocation)
try:
ip_qry.filter_by(network_id=network_id,
subnet_id=subnet_id,
ip_address=ip_address).one()
except exc.NoResultFound:
return True
return False
@staticmethod
def _check_subnet_ip(cidr, ip_address):
"""Validate that the IP address is on the subnet."""
ip = netaddr.IPAddress(ip_address)
net = netaddr.IPNetwork(cidr)
# Check that the IP is valid on subnet. This cannot be the
# network or the broadcast address
if (ip != net.network and
ip != net.broadcast and
net.netmask & ip == net.network):
return True
return False
@staticmethod
def _check_ip_in_allocation_pool(context, subnet_id, gateway_ip,
ip_address):
"""Validate IP in allocation pool.
Validates that the IP address is either the default gateway or
in the allocation pools of the subnet.
"""
# Check if the IP is the gateway
if ip_address == gateway_ip:
# Gateway is not in allocation pool
return False
# Check if the requested IP is in a defined allocation pool
pool_qry = context.session.query(models_v2.IPAllocationPool)
allocation_pools = pool_qry.filter_by(subnet_id=subnet_id)
ip = netaddr.IPAddress(ip_address)
for allocation_pool in allocation_pools:
allocation_pool_range = netaddr.IPRange(
allocation_pool['first_ip'],
allocation_pool['last_ip'])
if ip in allocation_pool_range:
return True
return False
def _test_fixed_ips_for_port(self, context, network_id, fixed_ips):
"""Test fixed IPs for port.
Check that configured subnets are valid prior to allocating any
IPs. Include the subnet_id in the result if only an IP address is
configured.
:raises: InvalidInput, IpAddressInUse
"""
fixed_ip_set = []
for fixed in fixed_ips:
found = False
if 'subnet_id' not in fixed:
if 'ip_address' not in fixed:
msg = _('IP allocation requires subnet_id or ip_address')
raise n_exc.InvalidInput(error_message=msg)
filter = {'network_id': [network_id]}
subnets = self.get_subnets(context, filters=filter)
for subnet in subnets:
if NeutronDbPluginV2._check_subnet_ip(subnet['cidr'],
fixed['ip_address']):
found = True
subnet_id = subnet['id']
break
if not found:
msg = _('IP address %s is not a valid IP for the defined '
'networks subnets') % fixed['ip_address']
raise n_exc.InvalidInput(error_message=msg)
else:
subnet = self._get_subnet(context, fixed['subnet_id'])
if subnet['network_id'] != network_id:
msg = (_("Failed to create port on network %(network_id)s"
", because fixed_ips included invalid subnet "
"%(subnet_id)s") %
{'network_id': network_id,
'subnet_id': fixed['subnet_id']})
raise n_exc.InvalidInput(error_message=msg)
subnet_id = subnet['id']
if 'ip_address' in fixed:
# Ensure that the IP's are unique
if not NeutronDbPluginV2._check_unique_ip(context, network_id,
subnet_id,
fixed['ip_address']):
raise n_exc.IpAddressInUse(net_id=network_id,
ip_address=fixed['ip_address'])
# Ensure that the IP is valid on the subnet
if (not found and
not NeutronDbPluginV2._check_subnet_ip(
subnet['cidr'], fixed['ip_address'])):
msg = _('IP address %s is not a valid IP for the defined '
'subnet') % fixed['ip_address']
raise n_exc.InvalidInput(error_message=msg)
fixed_ip_set.append({'subnet_id': subnet_id,
'ip_address': fixed['ip_address']})
else:
fixed_ip_set.append({'subnet_id': subnet_id})
if len(fixed_ip_set) > cfg.CONF.max_fixed_ips_per_port:
msg = _('Exceeded maximim amount of fixed ips per port')
raise n_exc.InvalidInput(error_message=msg)
return fixed_ip_set
def _allocate_fixed_ips(self, context, network, fixed_ips):
"""Allocate IP addresses according to the configured fixed_ips."""
ips = []
for fixed in fixed_ips:
if 'ip_address' in fixed:
# Remove the IP address from the allocation pool
NeutronDbPluginV2._allocate_specific_ip(
context, fixed['subnet_id'], fixed['ip_address'])
ips.append({'ip_address': fixed['ip_address'],
'subnet_id': fixed['subnet_id']})
# Only subnet ID is specified => need to generate IP
# from subnet
else:
subnets = [self._get_subnet(context, fixed['subnet_id'])]
# IP address allocation
result = self._generate_ip(context, subnets)
ips.append({'ip_address': result['ip_address'],
'subnet_id': result['subnet_id']})
return ips
def _update_ips_for_port(self, context, network_id, port_id, original_ips,
new_ips):
"""Add or remove IPs from the port."""
ips = []
# These ips are still on the port and haven't been removed
prev_ips = []
# the new_ips contain all of the fixed_ips that are to be updated
if len(new_ips) > cfg.CONF.max_fixed_ips_per_port:
msg = _('Exceeded maximim amount of fixed ips per port')
raise n_exc.InvalidInput(error_message=msg)
# Remove all of the intersecting elements
for original_ip in original_ips[:]:
for new_ip in new_ips[:]:
if ('ip_address' in new_ip and
original_ip['ip_address'] == new_ip['ip_address']):
original_ips.remove(original_ip)
new_ips.remove(new_ip)
prev_ips.append(original_ip)
# Check if the IP's to add are OK
to_add = self._test_fixed_ips_for_port(context, network_id, new_ips)
for ip in original_ips:
LOG.debug(_("Port update. Hold %s"), ip)
NeutronDbPluginV2._delete_ip_allocation(context,
network_id,
ip['subnet_id'],
ip['ip_address'])
if to_add:
LOG.debug(_("Port update. Adding %s"), to_add)
network = self._get_network(context, network_id)
ips = self._allocate_fixed_ips(context, network, to_add)
return ips, prev_ips
def _allocate_ips_for_port(self, context, network, port):
"""Allocate IP addresses for the port.
If port['fixed_ips'] is set to 'ATTR_NOT_SPECIFIED', allocate IP
addresses for the port. If port['fixed_ips'] contains an IP address or
a subnet_id then allocate an IP address accordingly.
"""
p = port['port']
ips = []
fixed_configured = p['fixed_ips'] is not attributes.ATTR_NOT_SPECIFIED
if fixed_configured:
configured_ips = self._test_fixed_ips_for_port(context,
p["network_id"],
p['fixed_ips'])
ips = self._allocate_fixed_ips(context, network, configured_ips)
else:
filter = {'network_id': [p['network_id']]}
subnets = self.get_subnets(context, filters=filter)
# Split into v4 and v6 subnets
v4 = []
v6 = []
for subnet in subnets:
if subnet['ip_version'] == 4:
v4.append(subnet)
else:
v6.append(subnet)
version_subnets = [v4, v6]
for subnets in version_subnets:
if subnets:
result = NeutronDbPluginV2._generate_ip(context, subnets)
ips.append({'ip_address': result['ip_address'],
'subnet_id': result['subnet_id']})
return ips
def _validate_subnet_cidr(self, context, network, new_subnet_cidr):
"""Validate the CIDR for a subnet.
Verifies the specified CIDR does not overlap with the ones defined
for the other subnets specified for this network, or with any other
CIDR if overlapping IPs are disabled.
"""
new_subnet_ipset = netaddr.IPSet([new_subnet_cidr])
if cfg.CONF.allow_overlapping_ips:
subnet_list = network.subnets
else:
subnet_list = self._get_all_subnets(context)
for subnet in subnet_list:
if (netaddr.IPSet([subnet.cidr]) & new_subnet_ipset):
# don't give out details of the overlapping subnet
err_msg = (_("Requested subnet with cidr: %(cidr)s for "
"network: %(network_id)s overlaps with another "
"subnet") %
{'cidr': new_subnet_cidr,
'network_id': network.id})
LOG.info(_("Validation for CIDR: %(new_cidr)s failed - "
"overlaps with subnet %(subnet_id)s "
"(CIDR: %(cidr)s)"),
{'new_cidr': new_subnet_cidr,
'subnet_id': subnet.id,
'cidr': subnet.cidr})
raise n_exc.InvalidInput(error_message=err_msg)
def _validate_allocation_pools(self, ip_pools, subnet_cidr):
"""Validate IP allocation pools.
Verify start and end address for each allocation pool are valid,
ie: constituted by valid and appropriately ordered IP addresses.
Also, verify pools do not overlap among themselves.
Finally, verify that each range fall within the subnet's CIDR.
"""
subnet = netaddr.IPNetwork(subnet_cidr)
subnet_first_ip = netaddr.IPAddress(subnet.first + 1)
subnet_last_ip = netaddr.IPAddress(subnet.last - 1)
LOG.debug(_("Performing IP validity checks on allocation pools"))
ip_sets = []
for ip_pool in ip_pools:
try:
start_ip = netaddr.IPAddress(ip_pool['start'])
end_ip = netaddr.IPAddress(ip_pool['end'])
except netaddr.AddrFormatError:
LOG.info(_("Found invalid IP address in pool: "
"%(start)s - %(end)s:"),
{'start': ip_pool['start'],
'end': ip_pool['end']})
raise n_exc.InvalidAllocationPool(pool=ip_pool)
if (start_ip.version != subnet.version or
end_ip.version != subnet.version):
LOG.info(_("Specified IP addresses do not match "
"the subnet IP version"))
raise n_exc.InvalidAllocationPool(pool=ip_pool)
if end_ip < start_ip:
LOG.info(_("Start IP (%(start)s) is greater than end IP "
"(%(end)s)"),
{'start': ip_pool['start'], 'end': ip_pool['end']})
raise n_exc.InvalidAllocationPool(pool=ip_pool)
if start_ip < subnet_first_ip or end_ip > subnet_last_ip:
LOG.info(_("Found pool larger than subnet "
"CIDR:%(start)s - %(end)s"),
{'start': ip_pool['start'],
'end': ip_pool['end']})
raise n_exc.OutOfBoundsAllocationPool(
pool=ip_pool,
subnet_cidr=subnet_cidr)
# Valid allocation pool
# Create an IPSet for it for easily verifying overlaps
ip_sets.append(netaddr.IPSet(netaddr.IPRange(
ip_pool['start'],
ip_pool['end']).cidrs()))
LOG.debug(_("Checking for overlaps among allocation pools "
"and gateway ip"))
ip_ranges = ip_pools[:]
# Use integer cursors as an efficient way for implementing
# comparison and avoiding comparing the same pair twice
for l_cursor in range(len(ip_sets)):
for r_cursor in range(l_cursor + 1, len(ip_sets)):
if ip_sets[l_cursor] & ip_sets[r_cursor]:
l_range = ip_ranges[l_cursor]
r_range = ip_ranges[r_cursor]
LOG.info(_("Found overlapping ranges: %(l_range)s and "
"%(r_range)s"),
{'l_range': l_range, 'r_range': r_range})
raise n_exc.OverlappingAllocationPools(
pool_1=l_range,
pool_2=r_range,
subnet_cidr=subnet_cidr)
def _validate_host_route(self, route, ip_version):
try:
netaddr.IPNetwork(route['destination'])
netaddr.IPAddress(route['nexthop'])
except netaddr.core.AddrFormatError:
err_msg = _("Invalid route: %s") % route
raise n_exc.InvalidInput(error_message=err_msg)
except ValueError:
# netaddr.IPAddress would raise this
err_msg = _("Invalid route: %s") % route
raise n_exc.InvalidInput(error_message=err_msg)
self._validate_ip_version(ip_version, route['nexthop'], 'nexthop')
self._validate_ip_version(ip_version, route['destination'],
'destination')
def _allocate_pools_for_subnet(self, context, subnet):
"""Create IP allocation pools for a given subnet
Pools are defined by the 'allocation_pools' attribute,
a list of dict objects with 'start' and 'end' keys for
defining the pool range.
"""
pools = []
# Auto allocate the pool around gateway_ip
net = netaddr.IPNetwork(subnet['cidr'])
first_ip = net.first + 1
last_ip = net.last - 1
gw_ip = int(netaddr.IPAddress(subnet['gateway_ip'] or net.last))
# Use the gw_ip to find a point for splitting allocation pools
# for this subnet
split_ip = min(max(gw_ip, net.first), net.last)
if split_ip > first_ip:
pools.append({'start': str(netaddr.IPAddress(first_ip)),
'end': str(netaddr.IPAddress(split_ip - 1))})
if split_ip < last_ip:
pools.append({'start': str(netaddr.IPAddress(split_ip + 1)),
'end': str(netaddr.IPAddress(last_ip))})
# return auto-generated pools
# no need to check for their validity
return pools
def _validate_shared_update(self, context, id, original, updated):
# The only case that needs to be validated is when 'shared'
# goes from True to False
if updated['shared'] == original.shared or updated['shared']:
return
ports = self._model_query(
context, models_v2.Port).filter(
models_v2.Port.network_id == id)
subnets = self._model_query(
context, models_v2.Subnet).filter(
models_v2.Subnet.network_id == id)
tenant_ids = set([port['tenant_id'] for port in ports] +
[subnet['tenant_id'] for subnet in subnets])
# raise if multiple tenants found or if the only tenant found
# is not the owner of the network
if (len(tenant_ids) > 1 or len(tenant_ids) == 1 and
tenant_ids.pop() != original.tenant_id):
raise n_exc.InvalidSharedSetting(network=original.name)
def _validate_ipv6_attributes(self, subnet, cur_subnet):
ra_mode_set = attributes.is_attr_set(subnet.get('ipv6_ra_mode'))
address_mode_set = attributes.is_attr_set(
subnet.get('ipv6_address_mode'))
if cur_subnet:
ra_mode = (subnet['ipv6_ra_mode'] if ra_mode_set
else cur_subnet['ipv6_ra_mode'])
addr_mode = (subnet['ipv6_address_mode'] if address_mode_set
else cur_subnet['ipv6_address_mode'])
if ra_mode_set or address_mode_set:
# Check that updated subnet ipv6 attributes do not conflict
self._validate_ipv6_combination(ra_mode, addr_mode)
self._validate_ipv6_update_dhcp(subnet, cur_subnet)
else:
self._validate_ipv6_dhcp(ra_mode_set, address_mode_set,
subnet['enable_dhcp'])
if ra_mode_set and address_mode_set:
self._validate_ipv6_combination(subnet['ipv6_ra_mode'],
subnet['ipv6_address_mode'])
def _validate_ipv6_combination(self, ra_mode, address_mode):
if ra_mode != address_mode:
msg = _("ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode "
"set to '%(addr_mode)s' is not valid. "
"If both attributes are set, they must be the same value"
) % {'ra_mode': ra_mode, 'addr_mode': address_mode}
raise n_exc.InvalidInput(error_message=msg)
def _validate_ipv6_dhcp(self, ra_mode_set, address_mode_set, enable_dhcp):
if (ra_mode_set or address_mode_set) and not enable_dhcp:
msg = _("ipv6_ra_mode or ipv6_address_mode cannot be set when "
"enable_dhcp is set to False.")
raise n_exc.InvalidInput(error_message=msg)
def _validate_ipv6_update_dhcp(self, subnet, cur_subnet):
if ('enable_dhcp' in subnet and not subnet['enable_dhcp']):
msg = _("Cannot disable enable_dhcp with "
"ipv6 attributes set")
ra_mode_set = attributes.is_attr_set(subnet.get('ipv6_ra_mode'))
address_mode_set = attributes.is_attr_set(
subnet.get('ipv6_address_mode'))
if ra_mode_set or address_mode_set:
raise n_exc.InvalidInput(error_message=msg)
old_ra_mode_set = attributes.is_attr_set(
cur_subnet.get('ipv6_ra_mode'))
old_address_mode_set = attributes.is_attr_set(
cur_subnet.get('ipv6_address_mode'))
if old_ra_mode_set or old_address_mode_set:
raise n_exc.InvalidInput(error_message=msg)
def _make_network_dict(self, network, fields=None,
process_extensions=True):
res = {'id': network['id'],
'name': network['name'],
'tenant_id': network['tenant_id'],
'admin_state_up': network['admin_state_up'],
'status': network['status'],
'shared': network['shared'],
'subnets': [subnet['id']
for subnet in network['subnets']]}
# Call auxiliary extend functions, if any
if process_extensions:
self._apply_dict_extend_functions(
attributes.NETWORKS, res, network)
return self._fields(res, fields)
def _make_subnet_dict(self, subnet, fields=None):
res = {'id': subnet['id'],
'name': subnet['name'],
'tenant_id': subnet['tenant_id'],
'network_id': subnet['network_id'],
'ip_version': subnet['ip_version'],
'cidr': subnet['cidr'],
'allocation_pools': [{'start': pool['first_ip'],
'end': pool['last_ip']}
for pool in subnet['allocation_pools']],
'gateway_ip': subnet['gateway_ip'],
'enable_dhcp': subnet['enable_dhcp'],
'ipv6_ra_mode': subnet['ipv6_ra_mode'],
'ipv6_address_mode': subnet['ipv6_address_mode'],
'dns_nameservers': [dns['address']
for dns in subnet['dns_nameservers']],
'host_routes': [{'destination': route['destination'],
'nexthop': route['nexthop']}
for route in subnet['routes']],
'shared': subnet['shared']
}
return self._fields(res, fields)
def _make_port_dict(self, port, fields=None,
process_extensions=True):
res = {"id": port["id"],
'name': port['name'],
"network_id": port["network_id"],
'tenant_id': port['tenant_id'],
"mac_address": port["mac_address"],
"admin_state_up": port["admin_state_up"],
"status": port["status"],
"fixed_ips": [{'subnet_id': ip["subnet_id"],
'ip_address': ip["ip_address"]}
for ip in port["fixed_ips"]],
"device_id": port["device_id"],
"device_owner": port["device_owner"]}
# Call auxiliary extend functions, if any
if process_extensions:
self._apply_dict_extend_functions(
attributes.PORTS, res, port)
return self._fields(res, fields)
def _create_bulk(self, resource, context, request_items):
objects = []
collection = "%ss" % resource
items = request_items[collection]
context.session.begin(subtransactions=True)
try:
for item in items:
obj_creator = getattr(self, 'create_%s' % resource)
objects.append(obj_creator(context, item))
context.session.commit()
except Exception:
context.session.rollback()
with excutils.save_and_reraise_exception():
LOG.error(_("An exception occurred while creating "
"the %(resource)s:%(item)s"),
{'resource': resource, 'item': item})
return objects
def create_network_bulk(self, context, networks):
return self._create_bulk('network', context, networks)
def create_network(self, context, network):
"""Handle creation of a single network."""
# single request processing
n = network['network']
# NOTE(jkoelker) Get the tenant_id outside of the session to avoid
# unneeded db action if the operation raises
tenant_id = self._get_tenant_id_for_create(context, n)
with context.session.begin(subtransactions=True):
args = {'tenant_id': tenant_id,
'id': n.get('id') or uuidutils.generate_uuid(),
'name': n['name'],
'admin_state_up': n['admin_state_up'],
'shared': n['shared'],
'status': n.get('status', constants.NET_STATUS_ACTIVE)}
network = models_v2.Network(**args)
context.session.add(network)
return self._make_network_dict(network, process_extensions=False)
def update_network(self, context, id, network):
n = network['network']
with context.session.begin(subtransactions=True):
network = self._get_network(context, id)
# validate 'shared' parameter
if 'shared' in n:
self._validate_shared_update(context, id, network, n)
network.update(n)
# also update shared in all the subnets for this network
subnets = self._get_subnets_by_network(context, id)
for subnet in subnets:
subnet['shared'] = network['shared']
return self._make_network_dict(network)
def delete_network(self, context, id):
with context.session.begin(subtransactions=True):
network = self._get_network(context, id)
filters = {'network_id': [id]}
# NOTE(armando-migliaccio): stick with base plugin
query = context.session.query(
models_v2.Port).enable_eagerloads(False)
ports = self._apply_filters_to_query(
query, models_v2.Port, filters).with_lockmode('update')
# check if there are any tenant owned ports in-use
only_auto_del = all(p['device_owner'] in AUTO_DELETE_PORT_OWNERS
for p in ports)
if not only_auto_del:
raise n_exc.NetworkInUse(net_id=id)
# clean up network owned ports
for port in ports:
self._delete_port(context, port['id'])
# clean up subnets
subnets_qry = context.session.query(models_v2.Subnet)
subnets_qry.filter_by(network_id=id).delete()
context.session.delete(network)
def get_network(self, context, id, fields=None):
network = self._get_network(context, id)
return self._make_network_dict(network, fields)
def get_networks(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'network', limit, marker)
return self._get_collection(context, models_v2.Network,
self._make_network_dict,
filters=filters, fields=fields,
sorts=sorts,
limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
def get_networks_count(self, context, filters=None):
return self._get_collection_count(context, models_v2.Network,
filters=filters)
def create_subnet_bulk(self, context, subnets):
return self._create_bulk('subnet', context, subnets)
def _validate_ip_version(self, ip_version, addr, name):
"""Check IP field of a subnet match specified ip version."""
ip = netaddr.IPNetwork(addr)
if ip.version != ip_version:
data = {'name': name,
'addr': addr,
'ip_version': ip_version}
msg = _("%(name)s '%(addr)s' does not match "
"the ip_version '%(ip_version)s'") % data
raise n_exc.InvalidInput(error_message=msg)
def _validate_subnet(self, context, s, cur_subnet=None):
"""Validate a subnet spec."""
# This method will validate attributes which may change during
# create_subnet() and update_subnet().
# The method requires the subnet spec 's' has 'ip_version' field.
# If 's' dict does not have 'ip_version' field in an API call
# (e.g., update_subnet()), you need to set 'ip_version' field
# before calling this method.
ip_ver = s['ip_version']
if 'cidr' in s:
self._validate_ip_version(ip_ver, s['cidr'], 'cidr')
if attributes.is_attr_set(s.get('gateway_ip')):
self._validate_ip_version(ip_ver, s['gateway_ip'], 'gateway_ip')
if (cfg.CONF.force_gateway_on_subnet and
not NeutronDbPluginV2._check_subnet_ip(s['cidr'],
s['gateway_ip'])):
error_message = _("Gateway is not valid on subnet")
raise n_exc.InvalidInput(error_message=error_message)
# Ensure the gateway IP is not assigned to any port
# skip this check in case of create (s parameter won't have id)
# NOTE(salv-orlando): There is slight chance of a race, when
# a subnet-update and a router-interface-add operation are
# executed concurrently
if cur_subnet:
alloc_qry = context.session.query(models_v2.IPAllocation)
allocated = alloc_qry.filter_by(
ip_address=cur_subnet['gateway_ip'],
subnet_id=cur_subnet['id']).first()
if allocated and allocated['port_id']:
raise n_exc.GatewayIpInUse(
ip_address=cur_subnet['gateway_ip'],
port_id=allocated['port_id'])
if attributes.is_attr_set(s.get('dns_nameservers')):
if len(s['dns_nameservers']) > cfg.CONF.max_dns_nameservers:
raise n_exc.DNSNameServersExhausted(
subnet_id=s.get('id', _('new subnet')),
quota=cfg.CONF.max_dns_nameservers)
for dns in s['dns_nameservers']:
try:
netaddr.IPAddress(dns)
except Exception:
raise n_exc.InvalidInput(
error_message=(_("Error parsing dns address %s") %
dns))
self._validate_ip_version(ip_ver, dns, 'dns_nameserver')
if attributes.is_attr_set(s.get('host_routes')):
if len(s['host_routes']) > cfg.CONF.max_subnet_host_routes:
raise n_exc.HostRoutesExhausted(
subnet_id=s.get('id', _('new subnet')),
quota=cfg.CONF.max_subnet_host_routes)
# check if the routes are all valid
for rt in s['host_routes']:
self._validate_host_route(rt, ip_ver)
if ip_ver == 6:
self._validate_ipv6_attributes(s, cur_subnet)
def _validate_gw_out_of_pools(self, gateway_ip, pools):
for allocation_pool in pools:
pool_range = netaddr.IPRange(
allocation_pool['start'],
allocation_pool['end'])
if netaddr.IPAddress(gateway_ip) in pool_range:
raise n_exc.GatewayConflictWithAllocationPools(
pool=pool_range,
ip_address=gateway_ip)
def create_subnet(self, context, subnet):
net = netaddr.IPNetwork(subnet['subnet']['cidr'])
# turn the CIDR into a proper subnet
subnet['subnet']['cidr'] = '%s/%s' % (net.network, net.prefixlen)
s = subnet['subnet']
if s['gateway_ip'] is attributes.ATTR_NOT_SPECIFIED:
s['gateway_ip'] = str(netaddr.IPAddress(net.first + 1))
if s['allocation_pools'] == attributes.ATTR_NOT_SPECIFIED:
s['allocation_pools'] = self._allocate_pools_for_subnet(context, s)
else:
self._validate_allocation_pools(s['allocation_pools'], s['cidr'])
if s['gateway_ip'] is not None:
self._validate_gw_out_of_pools(s['gateway_ip'],
s['allocation_pools'])
self._validate_subnet(context, s)
tenant_id = self._get_tenant_id_for_create(context, s)
with context.session.begin(subtransactions=True):
network = self._get_network(context, s["network_id"])
self._validate_subnet_cidr(context, network, s['cidr'])
# The 'shared' attribute for subnets is for internal plugin
# use only. It is not exposed through the API
args = {'tenant_id': tenant_id,
'id': s.get('id') or uuidutils.generate_uuid(),
'name': s['name'],
'network_id': s['network_id'],
'ip_version': s['ip_version'],
'cidr': s['cidr'],
'enable_dhcp': s['enable_dhcp'],
'gateway_ip': s['gateway_ip'],
'shared': network.shared}
if s['ip_version'] == 6 and s['enable_dhcp']:
if attributes.is_attr_set(s['ipv6_ra_mode']):
args['ipv6_ra_mode'] = s['ipv6_ra_mode']
if attributes.is_attr_set(s['ipv6_address_mode']):
args['ipv6_address_mode'] = s['ipv6_address_mode']
subnet = models_v2.Subnet(**args)
context.session.add(subnet)
if s['dns_nameservers'] is not attributes.ATTR_NOT_SPECIFIED:
for addr in s['dns_nameservers']:
ns = models_v2.DNSNameServer(address=addr,
subnet_id=subnet.id)
context.session.add(ns)
if s['host_routes'] is not attributes.ATTR_NOT_SPECIFIED:
for rt in s['host_routes']:
route = models_v2.SubnetRoute(
subnet_id=subnet.id,
destination=rt['destination'],
nexthop=rt['nexthop'])
context.session.add(route)
for pool in s['allocation_pools']:
ip_pool = models_v2.IPAllocationPool(subnet=subnet,
first_ip=pool['start'],
last_ip=pool['end'])
context.session.add(ip_pool)
ip_range = models_v2.IPAvailabilityRange(
ipallocationpool=ip_pool,
first_ip=pool['start'],
last_ip=pool['end'])
context.session.add(ip_range)
return self._make_subnet_dict(subnet)
def update_subnet(self, context, id, subnet):
"""Update the subnet with new info.
The change however will not be realized until the client renew the
dns lease or we support gratuitous DHCP offers
"""
s = subnet['subnet']
changed_host_routes = False
changed_dns = False
db_subnet = self._get_subnet(context, id)
# Fill 'ip_version' and 'allocation_pools' fields with the current
# value since _validate_subnet() expects subnet spec has 'ip_version'
# and 'allocation_pools' fields.
s['ip_version'] = db_subnet.ip_version
s['cidr'] = db_subnet.cidr
s['id'] = db_subnet.id
self._validate_subnet(context, s, cur_subnet=db_subnet)
if 'gateway_ip' in s and s['gateway_ip'] is not None:
allocation_pools = [{'start': p['first_ip'], 'end': p['last_ip']}
for p in db_subnet.allocation_pools]
self._validate_gw_out_of_pools(s["gateway_ip"], allocation_pools)
with context.session.begin(subtransactions=True):
if "dns_nameservers" in s:
changed_dns = True
old_dns_list = self._get_dns_by_subnet(context, id)
new_dns_addr_set = set(s["dns_nameservers"])
old_dns_addr_set = set([dns['address']
for dns in old_dns_list])
new_dns = list(new_dns_addr_set)
for dns_addr in old_dns_addr_set - new_dns_addr_set:
for dns in old_dns_list:
if dns['address'] == dns_addr:
context.session.delete(dns)
for dns_addr in new_dns_addr_set - old_dns_addr_set:
dns = models_v2.DNSNameServer(
address=dns_addr,
subnet_id=id)
context.session.add(dns)
del s["dns_nameservers"]
def _combine(ht):
return ht['destination'] + "_" + ht['nexthop']
if "host_routes" in s:
changed_host_routes = True
old_route_list = self._get_route_by_subnet(context, id)
new_route_set = set([_combine(route)
for route in s['host_routes']])
old_route_set = set([_combine(route)
for route in old_route_list])
for route_str in old_route_set - new_route_set:
for route in old_route_list:
if _combine(route) == route_str:
context.session.delete(route)
for route_str in new_route_set - old_route_set:
route = models_v2.SubnetRoute(
destination=route_str.partition("_")[0],
nexthop=route_str.partition("_")[2],
subnet_id=id)
context.session.add(route)
# Gather host routes for result
new_routes = []
for route_str in new_route_set:
new_routes.append(
{'destination': route_str.partition("_")[0],
'nexthop': route_str.partition("_")[2]})
del s["host_routes"]
subnet = self._get_subnet(context, id)
subnet.update(s)
result = self._make_subnet_dict(subnet)
# Keep up with fields that changed
if changed_dns:
result['dns_nameservers'] = new_dns
if changed_host_routes:
result['host_routes'] = new_routes
return result
def delete_subnet(self, context, id):
with context.session.begin(subtransactions=True):
subnet = self._get_subnet(context, id)
# Check if any tenant owned ports are using this subnet
allocated = (context.session.query(models_v2.IPAllocation).
filter_by(subnet_id=subnet['id']).
join(models_v2.Port).
filter_by(network_id=subnet['network_id']).
with_lockmode('update'))
# remove network owned ports
for a in allocated:
if a.ports.device_owner in AUTO_DELETE_PORT_OWNERS:
NeutronDbPluginV2._delete_ip_allocation(
context, subnet.network_id, id, a.ip_address)
else:
raise n_exc.SubnetInUse(subnet_id=id)
context.session.delete(subnet)
def get_subnet(self, context, id, fields=None):
subnet = self._get_subnet(context, id)
return self._make_subnet_dict(subnet, fields)
def get_subnets(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'subnet', limit, marker)
return self._get_collection(context, models_v2.Subnet,
self._make_subnet_dict,
filters=filters, fields=fields,
sorts=sorts,
limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
def get_subnets_count(self, context, filters=None):
return self._get_collection_count(context, models_v2.Subnet,
filters=filters)
def create_port_bulk(self, context, ports):
return self._create_bulk('port', context, ports)
def create_port(self, context, port):
p = port['port']
port_id = p.get('id') or uuidutils.generate_uuid()
network_id = p['network_id']
mac_address = p['mac_address']
# NOTE(jkoelker) Get the tenant_id outside of the session to avoid
# unneeded db action if the operation raises
tenant_id = self._get_tenant_id_for_create(context, p)
if p.get('device_owner') == constants.DEVICE_OWNER_ROUTER_INTF:
self._enforce_device_owner_not_router_intf_or_device_id(context, p,
tenant_id)
with context.session.begin(subtransactions=True):
network = self._get_network(context, network_id)
# Ensure that a MAC address is defined and it is unique on the
# network
if mac_address is attributes.ATTR_NOT_SPECIFIED:
mac_address = NeutronDbPluginV2._generate_mac(context,
network_id)
else:
# Ensure that the mac on the network is unique
if not NeutronDbPluginV2._check_unique_mac(context,
network_id,
mac_address):
raise n_exc.MacAddressInUse(net_id=network_id,
mac=mac_address)
# Returns the IP's for the port
ips = self._allocate_ips_for_port(context, network, port)
if 'status' not in p:
status = constants.PORT_STATUS_ACTIVE
else:
status = p['status']
port = models_v2.Port(tenant_id=tenant_id,
name=p['name'],
id=port_id,
network_id=network_id,
mac_address=mac_address,
admin_state_up=p['admin_state_up'],
status=status,
device_id=p['device_id'],
device_owner=p['device_owner'])
context.session.add(port)
# Update the allocated IP's
if ips:
for ip in ips:
ip_address = ip['ip_address']
subnet_id = ip['subnet_id']
LOG.debug(_("Allocated IP %(ip_address)s "
"(%(network_id)s/%(subnet_id)s/%(port_id)s)"),
{'ip_address': ip_address,
'network_id': network_id,
'subnet_id': subnet_id,
'port_id': port_id})
allocated = models_v2.IPAllocation(
network_id=network_id,
port_id=port_id,
ip_address=ip_address,
subnet_id=subnet_id,
)
context.session.add(allocated)
return self._make_port_dict(port, process_extensions=False)
def update_port(self, context, id, port):
p = port['port']
changed_ips = False
with context.session.begin(subtransactions=True):
port = self._get_port(context, id)
if 'device_owner' in p:
current_device_owner = p['device_owner']
changed_device_owner = True
else:
current_device_owner = port['device_owner']
changed_device_owner = False
if p.get('device_id') != port['device_id']:
changed_device_id = True
# if the current device_owner is ROUTER_INF and the device_id or
# device_owner changed check device_id is not another tenants
# router
if ((current_device_owner == constants.DEVICE_OWNER_ROUTER_INTF)
and (changed_device_id or changed_device_owner)):
self._enforce_device_owner_not_router_intf_or_device_id(
context, p, port['tenant_id'], port)
# Check if the IPs need to be updated
if 'fixed_ips' in p:
changed_ips = True
original = self._make_port_dict(port, process_extensions=False)
added_ips, prev_ips = self._update_ips_for_port(
context, port["network_id"], id, original["fixed_ips"],
p['fixed_ips'])
# Update ips if necessary
for ip in added_ips:
allocated = models_v2.IPAllocation(
network_id=port['network_id'], port_id=port.id,
ip_address=ip['ip_address'], subnet_id=ip['subnet_id'])
context.session.add(allocated)
# Remove all attributes in p which are not in the port DB model
# and then update the port
port.update(self._filter_non_model_columns(p, models_v2.Port))
result = self._make_port_dict(port)
# Keep up with fields that changed
if changed_ips:
result['fixed_ips'] = prev_ips + added_ips
return result
def delete_port(self, context, id):
with context.session.begin(subtransactions=True):
self._delete_port(context, id)
def delete_ports_by_device_id(self, context, device_id, network_id=None):
query = (context.session.query(models_v2.Port.id)
.enable_eagerloads(False)
.filter(models_v2.Port.device_id == device_id))
if network_id:
query = query.filter(models_v2.Port.network_id == network_id)
port_ids = [p[0] for p in query]
for port_id in port_ids:
try:
self.delete_port(context, port_id)
except n_exc.PortNotFound:
# Don't raise if something else concurrently deleted the port
LOG.debug(_("Ignoring PortNotFound when deleting port '%s'. "
"The port has already been deleted."),
port_id)
def _delete_port(self, context, id):
query = (context.session.query(models_v2.Port).
enable_eagerloads(False).filter_by(id=id))
if not context.is_admin:
query = query.filter_by(tenant_id=context.tenant_id)
query.delete()
def get_port(self, context, id, fields=None):
port = self._get_port(context, id)
return self._make_port_dict(port, fields)
def _get_ports_query(self, context, filters=None, sorts=None, limit=None,
marker_obj=None, page_reverse=False):
Port = models_v2.Port
IPAllocation = models_v2.IPAllocation
if not filters:
filters = {}
query = self._model_query(context, Port)
fixed_ips = filters.pop('fixed_ips', {})
ip_addresses = fixed_ips.get('ip_address')
subnet_ids = fixed_ips.get('subnet_id')
if ip_addresses or subnet_ids:
query = query.join(Port.fixed_ips)
if ip_addresses:
query = query.filter(IPAllocation.ip_address.in_(ip_addresses))
if subnet_ids:
query = query.filter(IPAllocation.subnet_id.in_(subnet_ids))
query = self._apply_filters_to_query(query, Port, filters)
if limit and page_reverse and sorts:
sorts = [(s[0], not s[1]) for s in sorts]
query = sqlalchemyutils.paginate_query(query, Port, limit,
sorts, marker_obj)
return query
def get_ports(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'port', limit, marker)
query = self._get_ports_query(context, filters=filters,
sorts=sorts, limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
items = [self._make_port_dict(c, fields) for c in query]
if limit and page_reverse:
items.reverse()
return items
def get_ports_count(self, context, filters=None):
return self._get_ports_query(context, filters).count()
def _enforce_device_owner_not_router_intf_or_device_id(self, context,
port_request,
tenant_id,
db_port=None):
if not context.is_admin:
# find the device_id. If the call was update_port and the
# device_id was not passed in we use the device_id from the
# db.
device_id = port_request.get('device_id')
if not device_id and db_port:
device_id = db_port.get('device_id')
# check to make sure device_id does not match another tenants
# router.
if device_id:
if hasattr(self, 'get_router'):
try:
ctx_admin = ctx.get_admin_context()
router = self.get_router(ctx_admin, device_id)
except l3.RouterNotFound:
return
else:
l3plugin = (
manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT))
if l3plugin:
try:
ctx_admin = ctx.get_admin_context()
router = l3plugin.get_router(ctx_admin,
device_id)
except l3.RouterNotFound:
return
else:
# raise as extension doesn't support L3 anyways.
raise n_exc.DeviceIDNotOwnedByTenant(
device_id=device_id)
if tenant_id != router['tenant_id']:
raise n_exc.DeviceIDNotOwnedByTenant(device_id=device_id)
|
{
"content_hash": "808ab3b2d2c05065eab19afc74737745",
"timestamp": "",
"source": "github",
"line_count": 1533,
"max_line_length": 79,
"avg_line_length": 45.90084801043705,
"alnum_prop": 0.5298155359122303,
"repo_name": "vijayendrabvs/hap",
"id": "0701ff8cc5eb798380b0eff84de77ddbded83e85",
"size": "71052",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/db/db_base_plugin_v2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67930"
},
{
"name": "Python",
"bytes": "8801288"
},
{
"name": "Shell",
"bytes": "8920"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
}
|
import abc
class AbstractTypeRegistry:
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_namespaces(self):
raise NotImplementedError
@abc.abstractmethod
def get_types(self, namespace=None):
raise NotImplementedError
@abc.abstractmethod
def register_namespace(self, namespace=None):
raise NotImplementedError
@abc.abstractmethod
def update_spec(self, namespace=None, schema=None):
raise NotImplementedError
@abc.abstractmethod
def register_spec(self, namespace=None, schema=None):
raise NotImplementedError
@abc.abstractmethod
def get_spec_hash(self, schema=None):
raise NotImplementedError
@abc.abstractmethod
def get_registry_info(self, hash=None):
raise NotImplementedError
class AbstractTypeNamespaceAPI:
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_type_names(self):
raise NotImplementedError
@abc.abstractmethod
def get_type(self, type_name=None):
raise NotImplementedError
@abc.abstractmethod
def add_type(self, type_schema=None):
raise NotImplementedError
@abc.abstractmethod
def remove_type(self, type_schema=None):
raise NotImplementedError
@abc.abstractmethod
def update_type(self, type_name=None, type_schema=None):
raise NotImplementedError
@abc.abstractmethod
def publish_type(self, type_name=None):
raise NotImplementedError
@abc.abstractmethod
def unpublish_type(self, type_name=None):
raise NotImplementedError
@abc.abstractmethod
def add_user(self, type_schema=None):
raise NotImplementedError
@abc.abstractmethod
def update_user(self, type_schema=None):
raise NotImplementedError
@abc.abstractmethod
def remove_user(self, type_schema=None):
raise NotImplementedError
class AbstractTypeAPI:
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_schema(self):
raise NotImplementedError
@abc.abstractmethod
def set_schema(self, schema=None):
raise NotImplementedError
@abc.abstractmethod
def validate_schema(self, schema=None):
raise NotImplementedError
|
{
"content_hash": "d932a0f06dbe712459aa7f82a3c5abe3",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 60,
"avg_line_length": 24.619565217391305,
"alnum_prop": 0.6874172185430464,
"repo_name": "scanon/data_api2",
"id": "a40ffa7adac37b738d63a75398a96134a66c8e22",
"size": "2265",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "lib/doekbase/data_api/typesystem.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "99761"
},
{
"name": "JavaScript",
"bytes": "19146"
},
{
"name": "Makefile",
"bytes": "2881"
},
{
"name": "Perl",
"bytes": "64635"
},
{
"name": "Python",
"bytes": "426978"
},
{
"name": "Shell",
"bytes": "1768"
}
],
"symlink_target": ""
}
|
from tinydb import TinyDB, where
from tinydb.database import Table
from tinydb.storages import MemoryStorage
import pytest
from tinydb_smartcache import SmartCacheTable
@pytest.fixture
def db_smartcache():
TinyDB.table_class = SmartCacheTable
db_ = TinyDB(storage=MemoryStorage)
table = db_.table('_default')
table.insert_multiple({'int': 1, 'char': c} for c in 'abc')
return table
@pytest.fixture
def db():
db_ = TinyDB(storage=MemoryStorage)
db_.drop_tables()
db_.insert_multiple({'int': 1, 'char': c} for c in 'abc')
return db_
def test_smart_query_cache(db_smartcache):
db = db_smartcache
query = where('int') == 1
dummy = where('int') == 2
assert len(db.search(query)) == 3
assert len(db.search(dummy)) == 0
assert len(db._query_cache[query]) == 3
assert len(db._query_cache[dummy]) == 0
db.truncate()
assert not db.search(query)
assert not db.search(dummy)
assert len(db._query_cache[query]) == 0
assert len(db._query_cache[dummy]) == 0
# Test insert
db.insert({'int': 1})
assert len(db._query_cache) == 2
assert len(db._query_cache[query]) == 1
assert len(db._query_cache[dummy]) == 0
# Test update
db.update({'int': 2}, where('int') == 1)
assert len(db._query_cache[query]) == 0
assert len(db._query_cache[dummy]) == 1
assert db.count(query) == 0
# Test remove
db.insert({'int': 1})
db.remove(where('int') == 1)
assert db.count(where('int') == 1) == 0
def test_custom_table_class_via_class_attribute(db):
TinyDB.table_class = SmartCacheTable
table = db.table('table3')
assert isinstance(table, SmartCacheTable)
TinyDB.table_class = Table
# def test_custom_table_class_via_instance_attribute(db):
# db.table_class = SmartCacheTable
# table = db.table('table3')
# assert isinstance(table, SmartCacheTable)
def test_truncate(db_smartcache):
db = db_smartcache
db.truncate()
db.insert({})
db.truncate()
assert len(db) == 0
def test_all(db_smartcache):
db = db_smartcache
db.truncate()
for i in range(10):
db.insert({})
assert len(db.all()) == 10
def test_insert(db_smartcache):
db = db_smartcache
db.truncate()
db.insert({'int': 1, 'char': 'a'})
assert db.count(where('int') == 1) == 1
db.truncate()
db.insert({'int': 1, 'char': 'a'})
db.insert({'int': 1, 'char': 'b'})
db.insert({'int': 1, 'char': 'c'})
assert db.count(where('int') == 1) == 3
assert db.count(where('char') == 'a') == 1
def test_insert_ids(db_smartcache):
db = db_smartcache
db.truncate()
assert db.insert({'int': 1, 'char': 'a'}) == 1
assert db.insert({'int': 1, 'char': 'a'}) == 2
def test_insert_multiple(db_smartcache):
db = db_smartcache
db.truncate()
assert not db.contains(where('int') == 1)
# Insert multiple from list
db.insert_multiple([{'int': 1, 'char': 'a'},
{'int': 1, 'char': 'b'},
{'int': 1, 'char': 'c'}])
assert db.count(where('int') == 1) == 3
assert db.count(where('char') == 'a') == 1
# Insert multiple from generator function
def generator():
for j in range(10):
yield {'int': j}
db.truncate()
db.insert_multiple(generator())
for i in range(10):
assert db.count(where('int') == i) == 1
if hasattr(where('int'), 'exists'):
assert db.count(where('int').exists()) == 10
else:
assert db.count(where('int')) == 10
# Insert multiple from inline generator
db.truncate()
db.insert_multiple({'int': i} for i in range(10))
for i in range(10):
assert db.count(where('int') == i) == 1
def test_insert_multiple_with_ids(db_smartcache):
db = db_smartcache
db.truncate()
# Insert multiple from list
assert db.insert_multiple([{'int': 1, 'char': 'a'},
{'int': 1, 'char': 'b'},
{'int': 1, 'char': 'c'}]) == [1, 2, 3]
def test_remove(db_smartcache):
db = db_smartcache
db.remove(where('char') == 'b')
assert len(db) == 2
assert db.count(where('int') == 1) == 2
def test_remove_multiple(db_smartcache):
db = db_smartcache
db.remove(where('int') == 1)
assert len(db) == 0
def test_remove_ids(db_smartcache):
db = db_smartcache
db.remove(doc_ids=[1, 2])
assert len(db) == 1
def test_update(db_smartcache):
db = db_smartcache
assert db.count(where('int') == 1) == 3
db.update({'int': 2}, where('char') == 'a')
assert db.count(where('int') == 2) == 1
assert db.count(where('int') == 1) == 2
def test_update_transform(db_smartcache):
db = db_smartcache
def increment(field):
def transform(el):
el[field] += 1
return transform
def delete(field):
def transform(el):
del el[field]
return transform
assert db.count(where('int') == 1) == 3
db.update(increment('int'), where('char') == 'a')
db.update(delete('char'), where('char') == 'a')
assert db.count(where('int') == 2) == 1
assert db.count(where('char') == 'a') == 0
assert db.count(where('int') == 1) == 2
def test_update_ids(db_smartcache):
db = db_smartcache
db.update({'int': 2}, doc_ids=[1, 2])
assert db.count(where('int') == 2) == 2
def test_search(db_smartcache):
db = db_smartcache
assert not db._query_cache
assert len(db.search(where('int') == 1)) == 3
assert len(db._query_cache) == 1
assert len(db.search(where('int') == 1)) == 3 # Query result from cache
def test_contians(db_smartcache):
db = db_smartcache
assert db.contains(where('int') == 1)
assert not db.contains(where('int') == 0)
def test_contains_id(db_smartcache):
db = db_smartcache
assert not db.contains(doc_id=88)
def test_get(db_smartcache):
db = db_smartcache
item = db.get(where('char') == 'b')
assert item['char'] == 'b'
def test_get_ids(db_smartcache):
db = db_smartcache
el = db.all()[0]
assert db.get(doc_id=el.doc_id) == el
assert db.get(doc_id=float('NaN')) is None
def test_count(db_smartcache):
db = db_smartcache
assert db.count(where('int') == 1) == 3
assert db.count(where('char') == 'd') == 0
def test_contains(db_smartcache):
db = db_smartcache
assert db.contains(where('int') == 1)
assert not db.contains(where('int') == 0)
def test_get_idempotent(db_smartcache):
db = db_smartcache
u = db.get(where('int') == 1)
z = db.get(where('int') == 1)
assert u == z
|
{
"content_hash": "fc21a032533276834e5560183c7c5158",
"timestamp": "",
"source": "github",
"line_count": 308,
"max_line_length": 76,
"avg_line_length": 21.68831168831169,
"alnum_prop": 0.5781437125748503,
"repo_name": "msiemens/tinydb-smartcache",
"id": "e1e00cbb9d365a40629db56aad45ea8a4385b89a",
"size": "6680",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_smartcache.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10473"
}
],
"symlink_target": ""
}
|
def not_a_method():
return super(None, None).not_a_method()
class Base(object):
def something(self):
pass
class NotUselessSuper(Base):
def multiple_statements(self):
first = 42 * 24
return super(NotUselessSuper, self).multiple_statements() + first
def not_a_call(self):
return 1 + 2
def not_super_call(self):
return type(self).__class__
def not_super_attribute_access(self):
return super(NotUselessSuper, self)
def invalid_super_call(self):
return super(NotUselessSuper, 1).invalid_super_call()
def other_invalid_super_call(self):
return super(2, 3, 4, 5).other_invalid_super_call()
def different_name(self):
return super(NotUselessSuper, self).something()
def different_super_mro_pointer(self):
return super(Base, self).different_super_mro_pointer()
def different_super_type(self):
return super(NotUselessSuper, NotUselessSuper).different_super_type()
def other_different_super_type(self):
return super(NotUselessSuper, 1).other_different_super_type()
def not_passing_param(self, first):
return super(NotUselessSuper, self).not_passing_param()
def modifying_param(self, first):
return super(NotUselessSuper, self).modifying_param(first + 1)
def transforming_param(self, first):
return super(NotUselessSuper, self).transforming_param(type(first))
def modifying_variadic(self, *args):
return super(NotUselessSuper, self).modifying_variadic(tuple(args))
def not_passing_keyword_variadics(self, *args, **kwargs):
return super(NotUselessSuper, self).not_passing_keyword_variadics(*args)
def not_passing_default(self, first, second=None):
return super(NotUselessSuper, self).not_passing_default(first)
def passing_only_a_handful(self, first, second, third, fourth):
return super(NotUselessSuper, self).passing_only_a_handful(
first, second)
def not_the_same_order(self, first, second, third):
return super(NotUselessSuper, self).not_the_same_order(third, first, second)
def no_kwargs_in_signature(self, key=None):
values = {'key': 'something'}
return super(NotUselessSuper, self).no_kwargs_in_signature(**values)
def no_args_in_signature(self, first, second):
values = (first + 1, second + 2)
return super(NotUselessSuper, self).no_args_in_signature(*values)
def variadics_with_multiple_keyword_arguments(self, **kwargs):
return super(NotUselessSuper, self).variadics_with_multiple_keyword_arguments(
first=None,
second=None,
**kwargs)
def extraneous_keyword_params(self, none_ok=False):
super(NotUselessSuper, self).extraneous_keyword_params(
none_ok,
valid_values=[23, 42])
def extraneous_positional_args(self, **args):
super(NotUselessSuper, self).extraneous_positional_args(
1, 2, **args)
class UselessSuper(Base):
def equivalent_params(self): # [useless-super-delegation]
return super(UselessSuper, self).equivalent_params()
def equivalent_params_1(self, first): # [useless-super-delegation]
return super(UselessSuper, self).equivalent_params_1(first)
def equivalent_params_2(self, *args): # [useless-super-delegation]
return super(UselessSuper, self).equivalent_params_2(*args)
def equivalent_params_3(self, *args, **kwargs): # [useless-super-delegation]
return super(UselessSuper, self).equivalent_params_3(*args, **kwargs)
def equivalent_params_4(self, first): # [useless-super-delegation]
super(UselessSuper, self).equivalent_params_4(first)
def equivalent_params_5(self, first, *args): # [useless-super-delegation]
super(UselessSuper, self).equivalent_params_5(first, *args)
def equivalent_params_6(self, first, *args, **kwargs): # [useless-super-delegation]
return super(UselessSuper, self).equivalent_params_6(first, *args, **kwargs)
def __init__(self): # [useless-super-delegation]
super(UselessSuper, self).__init__()
def trigger_something(value_to_trigger):
pass
class NotUselessSuperDecorators(Base):
@trigger_something('value1')
def method_decorated(self):
super(NotUselessSuperDecorators, self).method_decorated()
|
{
"content_hash": "8f3b08dfb29ee844aa4d2c7f6a641090",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 87,
"avg_line_length": 34.52755905511811,
"alnum_prop": 0.675484606613455,
"repo_name": "rvmoura96/projeto-almoxarifado",
"id": "b8aa71df35cea652d71460a28ccc0b5d5bc36780",
"size": "4557",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "myvenv/Lib/site-packages/pylint/test/functional/useless_super_delegation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1298"
},
{
"name": "C",
"bytes": "426659"
},
{
"name": "C++",
"bytes": "237226"
},
{
"name": "CSS",
"bytes": "47496"
},
{
"name": "DTrace",
"bytes": "863"
},
{
"name": "HTML",
"bytes": "106823"
},
{
"name": "JavaScript",
"bytes": "115482"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "11286094"
},
{
"name": "Shell",
"bytes": "182"
},
{
"name": "Tcl",
"bytes": "1295070"
}
],
"symlink_target": ""
}
|
"""Unit tests for low_rank.py."""
from itertools import product
import copy
import numpy as np
import pytest
from scipy.linalg import expm
from scipy.sparse.linalg import expm as sp_expm
import openfermion as of
from openfermion import givens_decomposition_square
from openfermion.testing.testing_utils import (
random_quadratic_hamiltonian,
random_unitary_matrix,
random_hermitian_matrix,
)
import fqe
from fqe.hamiltonians.restricted_hamiltonian import RestrictedHamiltonian
from fqe.algorithm.low_rank import (evolve_fqe_givens, evolve_fqe_givens_sector,
evolve_fqe_diagonal_coulomb,
double_factor_trotter_evolution)
def evolve_wf_givens(wfn: np.ndarray, u: np.ndarray) -> np.ndarray:
"""Utility for testing evolution of a full 2^{n} wavefunction.
Args:
wfn: 2^{n} x 1 vector.
u: (n//2 x n//2) unitary matrix.
Returns:
New evolved 2^{n} x 1 vector.
"""
rotations, diagonal = givens_decomposition_square(u.copy())
n_qubits = u.shape[0] * 2
# Iterate through each layer and time evolve by the appropriate
# fermion operators
for layer in rotations:
for givens in layer:
i, j, theta, phi = givens
op = of.FermionOperator(((2 * j, 1), (2 * j, 0)), coefficient=-phi)
op += of.FermionOperator(((2 * j + 1, 1), (2 * j + 1, 0)),
coefficient=-phi)
wfn = (sp_expm(-1j * of.get_sparse_operator(op, n_qubits=n_qubits))
@ wfn)
op = of.FermionOperator(
((2 * i, 1),
(2 * j, 0)), coefficient=-1j * theta) + of.FermionOperator(
((2 * j, 1), (2 * i, 0)), coefficient=1j * theta)
op += of.FermionOperator(
((2 * i + 1, 1),
(2 * j + 1, 0)), coefficient=-1j * theta) + of.FermionOperator(
((2 * j + 1, 1), (2 * i + 1, 0)), coefficient=1j * theta)
wfn = (sp_expm(-1j * of.get_sparse_operator(op, n_qubits=n_qubits))
@ wfn)
# evolve the last diagonal phases
for idx, final_phase in enumerate(diagonal):
if not np.isclose(final_phase, 1.0):
op = of.FermionOperator(((2 * idx, 1), (2 * idx, 0)),
-np.angle(final_phase))
op += of.FermionOperator(((2 * idx + 1, 1), (2 * idx + 1, 0)),
-np.angle(final_phase))
wfn = (sp_expm(-1j * of.get_sparse_operator(op, n_qubits=n_qubits))
@ wfn)
return wfn
def evolve_wf_diagonal_coulomb(wf: np.ndarray, vij_mat: np.ndarray,
time=1) -> np.ndarray:
r"""Utility for testing evolution of a full 2^{n} wavefunction via
:math:`exp{-i time * \sum_{i,j, sigma, tau}v_{i, j}n_{i\sigma}n_{j\tau}}.`
Args:
wf: 2^{n} x 1 vector
vij_mat: List[(n//2 x n//2)] matrices
Returns:
New evolved 2^{n} x 1 vector
"""
norbs = int(np.log2(wf.shape[0]) / 2)
diagonal_coulomb = of.FermionOperator()
for i, j in product(range(norbs), repeat=2):
for sigma, tau in product(range(2), repeat=2):
diagonal_coulomb += of.FermionOperator(
(
(2 * i + sigma, 1),
(2 * i + sigma, 0),
(2 * j + tau, 1),
(2 * j + tau, 0),
),
coefficient=vij_mat[i, j],
)
bigU = sp_expm(-1j * time *
of.get_sparse_operator(diagonal_coulomb, n_qubits=2 * norbs))
return bigU @ wf
def double_factor_trotter_wf_evolution(initial_wfn: np.ndarray,
basis_change_unitaries, vij_mats,
deltat) -> np.ndarray:
r"""Doubled Factorized Trotter Evolution.
This is for testing the FQE evolution. Same input except the initial
wavefunction should be the full 2^{2 * norbs) space column vector.
Args:
initial_wfn: Initial wavefunction to evolve.
basis_change_unitaries: List L + 1 unitaries. The first
unitary is U1 :math:`e^{-iTdt}` where T is the one-electron
component of the evolution. he remaining unitaries are
:math:`U_{i}U_{i-1}^{\dagger}.` All unitaries are expressed with
respect to the number of spatial basis functions.
vij_mats: list matrices of rho-rho interactions where
i, j indices of the matrix index the :math:`n_{i} n_{j}` integral.
Evolution is performed with respect to :math:`n_{i\sigma} n_{j\tau}`
where sigma and tau are up or down electron spins--a total of 4
Hamiltonian terms per i, j term.
deltat: evolution time prefactor for all v_ij Hamiltonians.
Returns:
The final wavefunction from a single Trotter evolution.
"""
intermediate_wfn = evolve_wf_givens(initial_wfn, basis_change_unitaries[0])
for step in range(1, len(basis_change_unitaries)):
intermediate_wfn = evolve_wf_diagonal_coulomb(intermediate_wfn,
vij_mats[step - 1],
deltat)
intermediate_wfn = evolve_wf_givens(intermediate_wfn,
basis_change_unitaries[step])
return intermediate_wfn
def test_fqe_givens():
"""Test Givens Rotation evolution for correctness."""
# set up
norbs = 4
n_elec = norbs
sz = 0
n_qubits = 2 * norbs
time = 0.126
fqe_wfn = fqe.Wavefunction([[n_elec, sz, norbs]])
fqe_wfn.set_wfn(strategy="random")
ikappa = random_quadratic_hamiltonian(
norbs,
conserves_particle_number=True,
real=False,
expand_spin=False,
seed=2,
)
fqe_ham = RestrictedHamiltonian((ikappa.n_body_tensors[1, 0],))
u = expm(-1j * ikappa.n_body_tensors[1, 0] * time)
# time-evolve
final_fqe_wfn = fqe_wfn.time_evolve(time, fqe_ham)
spin_ham = np.kron(ikappa.n_body_tensors[1, 0], np.eye(2))
assert of.is_hermitian(spin_ham)
ikappa_spin = of.InteractionOperator(
constant=0,
one_body_tensor=spin_ham,
two_body_tensor=np.zeros((n_qubits, n_qubits, n_qubits, n_qubits)),
)
bigU = expm(-1j * of.get_sparse_operator(ikappa_spin).toarray() * time)
initial_wf = fqe.to_cirq(fqe_wfn).reshape((-1, 1))
final_wf = bigU @ initial_wf
final_wfn_test = fqe.from_cirq(final_wf.flatten(), 1.0e-12)
assert np.allclose(final_fqe_wfn.rdm("i^ j"), final_wfn_test.rdm("i^ j"))
assert np.allclose(final_fqe_wfn.rdm("i^ j^ k l"),
final_wfn_test.rdm("i^ j^ k l"))
final_wfn_test2 = fqe.from_cirq(
evolve_wf_givens(initial_wf.copy(), u.copy()).flatten(), 1.0e-12)
givens_fqe_wfn = evolve_fqe_givens(fqe_wfn, u.copy())
assert np.allclose(givens_fqe_wfn.rdm("i^ j"), final_wfn_test2.rdm("i^ j"))
assert np.allclose(givens_fqe_wfn.rdm("i^ j^ k l"),
final_wfn_test2.rdm("i^ j^ k l"))
def test_fqe_givens_raises():
""" Make sure evolve_fqe_givens raises an exception on incorrect input.
"""
fqe_wfn = fqe.Wavefunction([[2, 2, 2]])
u = np.zeros((2, 2), dtype=np.complex128)
with pytest.raises(ValueError):
fqe_wfn = evolve_fqe_givens_sector(fqe_wfn, u, sector="test")
u = np.zeros((3, 3), dtype=np.complex128)
with pytest.raises(ValueError):
fqe_wfn = evolve_fqe_givens_sector(fqe_wfn, u)
def test_charge_charge_evolution():
norbs = 4
n_elec = norbs
sz = 0
time = 0.126
fqe_wfn = fqe.Wavefunction([[n_elec, sz, norbs]])
fqe_wfn.set_wfn(strategy="random")
initial_fqe_wfn = copy.deepcopy(fqe_wfn)
initial_wf = fqe.to_cirq(fqe_wfn).reshape((-1, 1))
# time-evolve
vij = np.random.random((norbs, norbs))
vij = vij + vij.T
final_fqe_wfn = evolve_fqe_diagonal_coulomb(initial_fqe_wfn, vij, time)
test_wfn = evolve_wf_diagonal_coulomb(wf=initial_wf, vij_mat=vij, time=time)
test_wfn = fqe.from_cirq(test_wfn.flatten(), 1.0e-12)
assert np.allclose(final_fqe_wfn.rdm("i^ j"), test_wfn.rdm("i^ j"))
assert np.allclose(final_fqe_wfn.rdm("i^ j^ k l"),
test_wfn.rdm("i^ j^ k l"))
def test_double_factorization_trotter():
norbs = 4
n_elec = norbs
sz = 0
time = 0.126
fqe_wfn = fqe.Wavefunction([[n_elec, sz, norbs]])
fqe_wfn.set_wfn(strategy="random")
fqe_wfn.print_wfn()
initial_wf = fqe.to_cirq(fqe_wfn).reshape((-1, 1))
basis_change_unitaries = []
for ii in range(2):
basis_change_unitaries.append(
random_unitary_matrix(4, real=False, seed=ii))
vij_mats = []
for ii in range(1):
vij_mats.append(random_hermitian_matrix(norbs, real=True, seed=ii))
with pytest.raises(ValueError):
_ = double_factor_trotter_evolution(fqe_wfn,
basis_change_unitaries + [0],
vij_mats=vij_mats,
deltat=0)
final_wfn = double_factor_trotter_evolution(
initial_wfn=fqe_wfn,
basis_change_unitaries=basis_change_unitaries,
vij_mats=vij_mats,
deltat=time,
)
intermediate_wfn = evolve_wf_givens(initial_wf.copy(),
basis_change_unitaries[0])
for step in range(1, len(basis_change_unitaries)):
intermediate_wfn = evolve_wf_diagonal_coulomb(intermediate_wfn,
vij_mats[step - 1], time)
intermediate_wfn = evolve_wf_givens(intermediate_wfn,
basis_change_unitaries[step])
test_final_wfn = fqe.from_cirq(intermediate_wfn.flatten(), 1.0e-12)
assert np.allclose(final_wfn.rdm("i^ j"), test_final_wfn.rdm("i^ j"))
assert np.allclose(final_wfn.rdm("i^ j^ k l"),
test_final_wfn.rdm("i^ j^ k l"))
test_final_wfn = double_factor_trotter_wf_evolution(initial_wf.copy(),
basis_change_unitaries,
vij_mats, time)
test_final_wfn = fqe.from_cirq(test_final_wfn.flatten(), 1.0e-12)
assert np.allclose(final_wfn.rdm("i^ j"), test_final_wfn.rdm("i^ j"))
assert np.allclose(final_wfn.rdm("i^ j^ k l"),
test_final_wfn.rdm("i^ j^ k l"))
|
{
"content_hash": "0e4f6ca2ab59c448ea86e5cacef7e781",
"timestamp": "",
"source": "github",
"line_count": 270,
"max_line_length": 80,
"avg_line_length": 39.3962962962963,
"alnum_prop": 0.5574880135376515,
"repo_name": "quantumlib/OpenFermion-FQE",
"id": "281453079af4a84bfa52bb1c2076507c2e1886de",
"size": "11230",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/low_rank_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "219159"
},
{
"name": "Cython",
"bytes": "56133"
},
{
"name": "Makefile",
"bytes": "580"
},
{
"name": "Python",
"bytes": "1684470"
},
{
"name": "Shell",
"bytes": "5724"
}
],
"symlink_target": ""
}
|
def catalyst_pmid_request(first, middle, last, email, debug=False):
"""
Give an author name at the University of Florida, return the PMIDs of
papers that are likely to be the works of the author. The Harvard
Catalyst GETPMIDS service is called.
Uses HTTP XML Post request, by www.forceflow.be
"""
request = tempita.Template("""
<?xml version="1.0"?>
<FindPMIDs>
<Name>
<First>{{first}}</First>
<Middle>{{middle}}</Middle>
<Last>{{last}}</Last>
<Suffix/>
</Name>
<EmailList>
<email>{{email}}</email>
</EmailList>
<AffiliationList>
<Affiliation>%university of florida%</Affiliation>
<Affiliation>%@ufl.edu%</Affiliation>
</AffiliationList>
<LocalDuplicateNames>1</LocalDuplicateNames>
<RequireFirstName>false</RequireFirstName>
<MatchThreshold>0.98</MatchThreshold>
</FindPMIDs>""")
HOST = "profiles.catalyst.harvard.edu"
API_URL = "/services/GETPMIDs/default.asp"
request = request.substitute(first=first, middle=middle, last=last, \
email=email)
webservice = httplib.HTTP(HOST)
webservice.putrequest("POST", API_URL)
webservice.putheader("Host", HOST)
webservice.putheader("User-Agent", "Python post")
webservice.putheader("Content-type", "text/xml; charset=\"UTF-8\"")
webservice.putheader("Content-length", "%d" % len(request))
webservice.endheaders()
webservice.send(request)
statuscode, statusmessage, header = webservice.getreply()
result = webservice.getfile().read()
if debug:
print "Request", request
print "StatusCode, Messgage,header", statuscode, statusmessage, header
print "result", result
return result
|
{
"content_hash": "d8a14e97f5618225108df9caebad351c",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 78,
"avg_line_length": 40.06382978723404,
"alnum_prop": 0.6075411577270313,
"repo_name": "indera/vivo-pump",
"id": "afd75f2a832a5a155226efdf9bde780f7408bd12",
"size": "1883",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uf_examples/publications/catalyst_request.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "1398"
},
{
"name": "Python",
"bytes": "266337"
},
{
"name": "TeX",
"bytes": "945852"
}
],
"symlink_target": ""
}
|
"""
Allows access to the bot account's watchlist.
The watchlist can be updated manually by running this script.
Syntax:
python pwb.py watchlist [-all | -new]
Command line options:
-all - Reloads watchlists for all wikis where a watchlist is already
present
-new - Load watchlists for all wikis where accounts is setting in
user-config.py
"""
#
# (C) Daniel Herding, 2005
# (C) Pywikibot team, 2005-2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
import os
import pywikibot
from pywikibot import config
from pywikibot.data.api import CachedRequest
from scripts.maintenance.cache import CacheEntry
def get(site=None):
"""Load the watchlist, fetching it if necessary."""
if site is None:
site = pywikibot.Site()
watchlist = [p.title() for p in site.watched_pages()]
return watchlist
def isWatched(pageName, site=None):
"""Check whether a page is being watched."""
watchlist = get(site)
return pageName in watchlist
def refresh(site, sysop=False):
"""Fetch the watchlist."""
pywikibot.output('Retrieving watchlist for {0}.'.format(str(site)))
return list(site.watched_pages(sysop=sysop, force=True))
def refresh_all(sysop=False):
"""Reload watchlists for all wikis where a watchlist is already present."""
cache_path = CachedRequest._get_cache_dir()
files = os.listdir(cache_path)
seen = []
for filename in files:
entry = CacheEntry(cache_path, filename)
entry._load_cache()
entry.parse_key()
entry._rebuild()
if entry.site not in seen:
if entry._data.get('watchlistraw'):
refresh(entry.site, sysop)
seen.append(entry.site)
def refresh_new(sysop=False):
"""Load watchlists of all wikis for accounts set in user-config.py."""
pywikibot.output(
'Downloading all watchlists for your accounts in user-config.py')
for family in config.usernames:
for lang in config.usernames[family]:
site = pywikibot.Site(lang, family)
refresh(site, sysop=sysop)
for family in config.sysopnames:
for lang in config.sysopnames[family]:
site = pywikibot.Site(lang, family)
refresh(site, sysop=sysop)
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
all = False
new = False
sysop = False
for arg in pywikibot.handle_args(args):
if arg in ('-all', '-update'):
all = True
elif arg == '-new':
new = True
elif arg == '-sysop':
sysop = True
if all:
refresh_all(sysop=sysop)
elif new:
refresh_new(sysop=sysop)
else:
site = pywikibot.Site()
watchlist = refresh(site, sysop=sysop)
pywikibot.output(u'%i pages in the watchlist.' % len(watchlist))
for page in watchlist:
try:
pywikibot.stdout(page.title())
except pywikibot.InvalidTitle:
pywikibot.exception()
if __name__ == "__main__":
main()
|
{
"content_hash": "d636d33ce4c75aab9adf3afc785fdf25",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 79,
"avg_line_length": 27.35,
"alnum_prop": 0.6279707495429616,
"repo_name": "magul/pywikibot-core",
"id": "cde0c0377a73e85e60e6b61d14e5df3c5f150a4c",
"size": "3324",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scripts/watchlist.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "97"
},
{
"name": "HTML",
"bytes": "1365"
},
{
"name": "Python",
"bytes": "4538707"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import getpass
from optparse import make_option
from django.core.management.base import BaseCommand
from django.utils.six.moves import input
from django.utils.translation import ugettext_lazy as _
from reviewboard.hostingsvcs.errors import (AuthorizationError,
TwoFactorAuthCodeRequiredError)
from reviewboard.hostingsvcs.models import HostingServiceAccount
class Command(BaseCommand):
help = _('Resets associated GitHub tokens')
option_list = BaseCommand.option_list + (
make_option('--yes',
action='store_true',
default=False,
dest='force_yes',
help=_('Answer yes to all questions')),
make_option('--local-sites',
action='store',
dest='local_sites',
help=_('Comma-separated list of Local Sites to '
'filter by')),
)
def handle(self, *usernames, **options):
force_yes = options['force_yes']
local_sites = options['local_sites']
accounts = HostingServiceAccount.objects.filter(service_name='github')
if usernames:
accounts = accounts.filter(username__in=usernames)
if local_sites:
local_site_names = local_sites.split(',')
if local_site_names:
accounts = accounts.filter(
local_site__name__in=local_site_names)
for account in accounts:
if force_yes:
reset = 'y'
else:
if account.local_site:
reset_msg = _('Reset token for %(site_name)s '
'(%(username)s) [Y/n] ') % {
'site_name': account.local_site.name,
'username': account.username,
}
else:
reset_msg = _('Reset token for %s [Y/n] ') % (
account.username)
reset = input(reset_msg)
if reset != 'n':
self._reset_token(account)
def _reset_token(self, account):
service = account.service
password = None
auth_token = None
while True:
if (not password and
service.get_reset_auth_token_requires_password()):
password = getpass.getpass(_('Password for %s: ')
% account.username)
auth_token = None
try:
service.reset_auth_token(password, auth_token)
self.stdout.write(_('Successfully reset token for %s\n')
% account.username)
break
except TwoFactorAuthCodeRequiredError:
auth_token = input('Enter your two-factor auth token: ')
except AuthorizationError as e:
self.stderr.write('%s\n' % e)
password = None
except Exception as e:
self.stderr.write(_('Unexpected error: %s\n') % e)
raise
break
|
{
"content_hash": "ced05b0b4f07620d16a141a52089720b",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 78,
"avg_line_length": 34.83695652173913,
"alnum_prop": 0.5082683307332293,
"repo_name": "brennie/reviewboard",
"id": "5f12d70139cf3a97c45732155bf69dc8f2726715",
"size": "3205",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "reviewboard/hostingsvcs/management/commands/reset-github-tokens.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "247208"
},
{
"name": "HTML",
"bytes": "204351"
},
{
"name": "JavaScript",
"bytes": "2557855"
},
{
"name": "Python",
"bytes": "5241630"
},
{
"name": "Shell",
"bytes": "20225"
}
],
"symlink_target": ""
}
|
import asyncio
import threading
from . import *
from ...util.Configuration import Configuration
from ...util.funcutils import is_none
"""
Protocolar handler of the secure connection with the server
"""
class ProtocolHandler(asyncio.Protocol):
"""
The secure protocol handler used to communicate with the server.
The first state is S0. Some state objects are responsible of choosing
the next state.
Attribute(s):
- core: the ClientCore instance
- loop: the asynchronous input/output loop
- states: sequence of state objects
- state: the actual state (set in connection_made and changed by states)
- lock: lock object for serializing thread execution
- config: the client configuration as a list of curve and cipher names
- transport: the SSL socket
- password: the client password (set by the UI)
- login: the client login (set by the UI)
- ephecc: the ephemeral server public key (set by S0 state)
- ms: the client master secret (set by S1S state)
- session: the client session number (set by S1CR state)
- keyH: the client key handler (set by S31A state)
Method(s):
- connection_made: method called when the connection with the server is made
- data_received: method called each time a new data is received
- connection_lost: method called when the connection is lost or closed
- exception_handler: method called when an exception is raised by a state
- notify: notify ClientCore a property has changed
"""
def __init__(self, core):
"""Object initialization"""
self.core = core
self.loop = core.loop
self.password = self.login = 'None'
# The protocol states
self.states = {'0': StateS0(),
'1S': StateS1S(), '1CR': StateS1CR(), '1CA': StateS1CA(),
'21R': StateS21R(), '21A': StateS21A(),
'22R': StateS22R(), '22A': StateS22A(),
'31R': StateS31R(), '31A': StateS31A(),
'32R': StateS32R(), '32A': StateS32A(),
'33R': StateS33R(), '33A': StateS33A(),
'34R': StateS34R(), '34A': StateS34A(),
'35R': StateS35R(), '35A': StateS35A(),
'36R': StateS36R(), '36A': StateS36A(),
'37R': StateS37R(), '37A': StateS37A()}
# The client configuration
self.config = is_none(Configuration.curve1) + ";" + \
is_none(Configuration.cipher1) + ";" + \
is_none(Configuration.curve2) + ";" + \
is_none(Configuration.cipher2) + ";" + \
is_none(Configuration.curve3) + ";" + is_none(Configuration.cipher3)
# Lock object for serializing thread execution
self.lock = threading.Lock()
def connection_made(self, transport):
"""See mother class"""
self.transport = transport
self.peername = transport.get_extra_info('peername')
self.state = self.states['0'] # State 0 at the beginning
def data_received(self, data):
"""See mother class"""
# Wait for actual execution before scheduling a new execution
with self.lock:
self.loop.run_in_executor(None, self.state.do, self,
data) # Future execution
def connection_lost(self, exc):
"""See mother class"""
if exc:
self.loop.run_in_executor(None, self.notify,
'connection.state.error',
str(exc).capitalize())
else:
self.loop.run_in_executor(None, self.notify,
'connection.state.error',
'Connection closed')
asyncio.run_coroutine_threadsafe(self.core.close(), self.loop)
def exception_handler(self, exc):
"""Exception handler for actions executed by the executor"""
self.loop.run_in_executor(None, self.notify, 'connection.state.error',
str(exc).capitalize()[:50] + '...')
asyncio.run_coroutine_threadsafe(self.core.close(), self.loop)
def notify(self, key, value):
"""Notify ClientCore a property has changed"""
self.core.update(key, value)
|
{
"content_hash": "92068b7647fa4c9fc525f41c32430ead",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 80,
"avg_line_length": 42.94059405940594,
"alnum_prop": 0.5826608254553839,
"repo_name": "thethythy/Mnemopwd",
"id": "0eaad51462ebfbd12c0aea2117da3cc72c178cd9",
"size": "5734",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mnemopwd/client/corelayer/protocol/ProtocolHandler.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "580678"
}
],
"symlink_target": ""
}
|
class Operation(object):
def __eq__(self, other):
return self.__class__ == other.__class__ and self.__dict__ == other.__dict__
class Update(Operation):
def __init__(self, from_package, to_package, reason=None):
self.from_package = from_package
self.to_package = to_package
self.reason = None
def __repr__(self):
return "Updating %s (%s) to %s (%s)" % (
self.from_package.name, self.from_package.version,
self.to_package.name, self.to_package.version)
class Install(Operation):
def __init__(self, package, reason=None):
self.package = package
self.reason = reason
def __repr__(self):
return "Installing %s (%s)" % (self.package.name, self.package.version)
class Uninstall(Operation):
def __init__(self, package, reason=None):
self.package = package
self.reason = reason
def __repr__(self):
return "Uninstalling %s (%s)" % (self.package.name, self.package.version)
|
{
"content_hash": "2decd19d267b21aaee1355cf857fb0ba",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 84,
"avg_line_length": 33.9,
"alnum_prop": 0.5948869223205506,
"repo_name": "enthought/depsolver",
"id": "64321bad9f8854ec64f2a02e8495e8f40209946e",
"size": "1017",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "depsolver/solver/operations.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "5585"
},
{
"name": "Python",
"bytes": "307961"
},
{
"name": "Shell",
"bytes": "5111"
}
],
"symlink_target": ""
}
|
from unittest import mock
from neutron_lib import context
from neutron_lib.plugins import utils as plugin_utils
from oslo_config import cfg
from oslo_db import exception as exc
from sqlalchemy.orm import query
from neutron.plugins.ml2.drivers import type_vlan
from neutron.tests.unit import testlib_api
TENANT_NET = 'phys_net2'
VLAN_MIN = 200
VLAN_MAX = 209
VLAN_OUTSIDE = 100
NETWORK_VLAN_RANGES = {
TENANT_NET: [(VLAN_MIN, VLAN_MAX)],
}
NETWORK_VLAN_RANGES_CFG_ENTRIES = ["%s:%s:%s" %
(TENANT_NET, VLAN_MIN, VLAN_MAX)]
SERVICE_PLUGIN_KLASS = ('neutron.services.network_segment_range.plugin.'
'NetworkSegmentRangePlugin')
class HelpersTest(testlib_api.SqlTestCase):
def setUp(self):
super(HelpersTest, self).setUp()
self.driver = type_vlan.VlanTypeDriver()
self.driver.network_vlan_ranges = NETWORK_VLAN_RANGES
self.driver._sync_vlan_allocations()
self.context = context.get_admin_context()
def check_raw_segment(self, expected, observed):
for key, value in expected.items():
self.assertEqual(value, observed[key])
def test_primary_keys(self):
self.assertEqual(set(['physical_network', 'vlan_id']),
self.driver.primary_keys)
def test_allocate_specific_unallocated_segment_in_pools(self):
expected = dict(physical_network=TENANT_NET, vlan_id=VLAN_MIN)
observed = self.driver.allocate_fully_specified_segment(self.context,
**expected)
self.check_raw_segment(expected, observed)
def test_allocate_specific_allocated_segment_in_pools(self):
raw_segment = dict(physical_network=TENANT_NET, vlan_id=VLAN_MIN)
self.driver.allocate_fully_specified_segment(self.context,
**raw_segment)
observed = self.driver.allocate_fully_specified_segment(self.context,
**raw_segment)
self.assertIsNone(observed)
def test_allocate_specific_finally_allocated_segment_in_pools(self):
# Test case: allocate a specific unallocated segment in pools but
# the segment is allocated concurrently between select and update
raw_segment = dict(physical_network=TENANT_NET, vlan_id=VLAN_MIN)
with mock.patch.object(query.Query, 'update', return_value=0):
observed = self.driver.allocate_fully_specified_segment(
self.context, **raw_segment)
self.assertIsNone(observed)
def test_allocate_specific_unallocated_segment_outside_pools(self):
expected = dict(physical_network=TENANT_NET, vlan_id=VLAN_OUTSIDE)
observed = self.driver.allocate_fully_specified_segment(self.context,
**expected)
self.check_raw_segment(expected, observed)
def test_allocate_specific_allocated_segment_outside_pools(self):
raw_segment = dict(physical_network=TENANT_NET, vlan_id=VLAN_OUTSIDE)
self.driver.allocate_fully_specified_segment(self.context,
**raw_segment)
observed = self.driver.allocate_fully_specified_segment(self.context,
**raw_segment)
self.assertIsNone(observed)
def test_allocate_specific_finally_unallocated_segment_outside_pools(self):
# Test case: allocate a specific allocated segment in pools but
# the segment is concurrently unallocated after select or update
expected = dict(physical_network=TENANT_NET, vlan_id=VLAN_MIN)
with mock.patch.object(self.driver.model, 'save'):
observed = self.driver.allocate_fully_specified_segment(
self.context, **expected)
self.check_raw_segment(expected, observed)
def test_allocate_partial_segment_without_filters(self):
expected = dict(physical_network=TENANT_NET)
observed = self.driver.allocate_partially_specified_segment(
self.context)
self.check_raw_segment(expected, observed)
def test_allocate_partial_segment_with_filter(self):
expected = dict(physical_network=TENANT_NET)
observed = self.driver.allocate_partially_specified_segment(
self.context, **expected)
self.check_raw_segment(expected, observed)
def test_allocate_partial_segment_no_resource_available(self):
for i in range(VLAN_MIN, VLAN_MAX + 1):
self.driver.allocate_partially_specified_segment(self.context)
observed = self.driver.allocate_partially_specified_segment(
self.context)
self.assertIsNone(observed)
def test_allocate_partial_segment_outside_pools(self):
raw_segment = dict(physical_network='other_phys_net')
observed = self.driver.allocate_partially_specified_segment(
self.context, **raw_segment)
self.assertIsNone(observed)
def test_allocate_partial_segment_first_attempt_fails(self):
expected = dict(physical_network=TENANT_NET)
with mock.patch.object(query.Query, 'update', side_effect=[0, 1]):
self.assertRaises(
exc.RetryRequest,
self.driver.allocate_partially_specified_segment,
self.context, **expected)
observed = self.driver.allocate_partially_specified_segment(
self.context, **expected)
self.check_raw_segment(expected, observed)
class HelpersTestWithNetworkSegmentRange(HelpersTest):
def setUp(self):
super(HelpersTestWithNetworkSegmentRange, self).setUp()
cfg.CONF.set_override('network_vlan_ranges',
NETWORK_VLAN_RANGES_CFG_ENTRIES,
group='ml2_type_vlan')
cfg.CONF.set_override('service_plugins', [SERVICE_PLUGIN_KLASS])
self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges(
NETWORK_VLAN_RANGES_CFG_ENTRIES)
self.context = context.get_admin_context()
self.driver = type_vlan.VlanTypeDriver()
self.driver.initialize_network_segment_range_support()
self.driver._sync_vlan_allocations()
|
{
"content_hash": "0fbc1cba221a20c3b51ea070fb1aa4ca",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 79,
"avg_line_length": 45.1063829787234,
"alnum_prop": 0.6388364779874214,
"repo_name": "openstack/neutron",
"id": "8714e96b477fea3b3b530fc888d6a289702540b2",
"size": "6999",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/plugins/ml2/drivers/test_helpers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "2773"
},
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "15932611"
},
{
"name": "Ruby",
"bytes": "1257"
},
{
"name": "Shell",
"bytes": "83270"
}
],
"symlink_target": ""
}
|
import psycopg2 # noqa: F401
import sqlalchemy.dialects.postgresql as custom_types
# GeoAlchemy adds support for PostGIS extensions in SQLAlchemy. In order to
# activate it, we must import it so that it can hook into SQLAlchemy. While
# we don't use the Geometry type that we import, we do care about the side
# effects of the import. For more details, see here:
# https://geoalchemy-2.readthedocs.io/en/latest/core_tutorial.html#reflecting-tables.
from geoalchemy2 import Geometry # noqa: F401
from datahub.ingestion.source.sql.sql_common import (
BasicSQLAlchemyConfig,
SQLAlchemySource,
register_custom_type,
)
from datahub.metadata.com.linkedin.pegasus2avro.schema import (
ArrayTypeClass,
BytesTypeClass,
MapTypeClass,
)
register_custom_type(custom_types.ARRAY, ArrayTypeClass)
register_custom_type(custom_types.JSON, BytesTypeClass)
register_custom_type(custom_types.JSONB, BytesTypeClass)
register_custom_type(custom_types.HSTORE, MapTypeClass)
class PostgresConfig(BasicSQLAlchemyConfig):
# defaults
scheme = "postgresql+psycopg2"
def get_identifier(self: BasicSQLAlchemyConfig, schema: str, table: str) -> str:
regular = f"{schema}.{table}"
if self.database_alias:
return f"{self.database_alias}.{regular}"
if self.database:
return f"{self.database}.{regular}"
return regular
class PostgresSource(SQLAlchemySource):
def __init__(self, config, ctx):
super().__init__(config, ctx, "postgres")
@classmethod
def create(cls, config_dict, ctx):
config = PostgresConfig.parse_obj(config_dict)
return cls(config, ctx)
|
{
"content_hash": "6b09daec5eaf1e201386128ac2a2fd68",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 85,
"avg_line_length": 34.541666666666664,
"alnum_prop": 0.7243667068757539,
"repo_name": "linkedin/WhereHows",
"id": "dedbb95a859e48524755d723a17a2ed70cb4912d",
"size": "1718",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "metadata-ingestion/src/datahub/ingestion/source/sql/postgres.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "110129"
},
{
"name": "Dockerfile",
"bytes": "2521"
},
{
"name": "HTML",
"bytes": "131513"
},
{
"name": "Java",
"bytes": "1307442"
},
{
"name": "JavaScript",
"bytes": "148450"
},
{
"name": "Nearley",
"bytes": "2837"
},
{
"name": "Python",
"bytes": "1419332"
},
{
"name": "Shell",
"bytes": "2564"
},
{
"name": "TSQL",
"bytes": "42644"
},
{
"name": "TypeScript",
"bytes": "641014"
}
],
"symlink_target": ""
}
|
"""
test_sanity_bundle_augmentation.py
Unittest for bundle augmentation.
"""
from __future__ import absolute_import
import sys
import unittest
from ydk.services import CRUDService
from ydk.providers import NetconfServiceProvider
from ydk.models.augmentation import ietf_aug_base_1
from ydk.models.augmentation import ietf_aug_base_2
from test_utils import assert_with_error
from test_utils import ParametrizedTestCase
from test_utils import get_device_info
class SanityYang(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.ncc = NetconfServiceProvider(
cls.hostname,
cls.username,
cls.password,
cls.port,
cls.protocol,
cls.on_demand,
cls.common_cache,
cls.timeout)
cls.crud = CRUDService()
def setUp(self):
self.crud.delete(self.ncc, ietf_aug_base_1.Cpython())
self.crud.delete(self.ncc, ietf_aug_base_2.Cpython())
def tearDown(self):
self.crud.delete(self.ncc, ietf_aug_base_1.Cpython())
self.crud.delete(self.ncc, ietf_aug_base_2.Cpython())
def test_aug_base_1(self):
cpython = ietf_aug_base_1.Cpython()
cpython.doc.ydktest_aug_1.aug_one = 'aug one'
cpython.doc.ydktest_aug_2.aug_two = 'aug two'
cpython.doc.ydktest_aug_4.aug_four = 'aug four'
cpython.lib.ydktest_aug_1.ydktest_aug_nested_1.aug_one = 'aug one'
cpython.lib.ydktest_aug_2.ydktest_aug_nested_2.aug_two = 'aug two'
cpython.lib.ydktest_aug_4.ydktest_aug_nested_4.aug_four = 'aug four'
cpython.doc.disutils.four_aug_list.enabled = True
item1 = cpython.doc.disutils.four_aug_list.Ldata()
item2 = cpython.doc.disutils.four_aug_list.Ldata()
item1.name, item1.number = 'one', 1
item2.name, item1.number = 'two', 2
self.crud.create(self.ncc, cpython)
cpython_read = self.crud.read(self.ncc, ietf_aug_base_1.Cpython())
self.assertEqual(cpython, cpython_read)
def test_aug_base_2(self):
cpython = ietf_aug_base_2.Cpython()
cpython.tools.aug_four = 'aug four'
self.crud.create(self.ncc, cpython)
cpython_read = self.crud.read(self.ncc, ietf_aug_base_2.Cpython())
self.assertEqual(cpython, cpython_read)
if __name__ == '__main__':
device, non_demand, common_cache, timeout = get_device_info()
suite = unittest.TestSuite()
suite.addTest(ParametrizedTestCase.parametrize(
SanityYang,
device=device,
non_demand=non_demand,
common_cache=common_cache,
timeout=timeout))
ret = not unittest.TextTestRunner(verbosity=2).run(suite).wasSuccessful()
sys.exit(ret)
|
{
"content_hash": "578b2b81f97c941383565ba9660f7f9c",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 77,
"avg_line_length": 32.1764705882353,
"alnum_prop": 0.6497257769652651,
"repo_name": "CiscoDevNet/ydk-gen",
"id": "5ec76f7b2d8bd24f38f18fcf89397e41c3adc670",
"size": "3448",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "sdk/python/core/tests/test_sanity_augmentation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "21945"
},
{
"name": "C",
"bytes": "15875"
},
{
"name": "C++",
"bytes": "3529963"
},
{
"name": "CMake",
"bytes": "120070"
},
{
"name": "CSS",
"bytes": "134"
},
{
"name": "Dockerfile",
"bytes": "770"
},
{
"name": "Go",
"bytes": "566728"
},
{
"name": "Makefile",
"bytes": "960022"
},
{
"name": "Python",
"bytes": "1052712"
},
{
"name": "Ruby",
"bytes": "4023"
},
{
"name": "Shell",
"bytes": "153786"
}
],
"symlink_target": ""
}
|
categories = ["save_and_restore_state",
"save_and_restore_control_and_status"]
microcode = '''
# 128 bit multimedia and scientific instructions
'''
for category in categories:
exec("from . import %s as cat" % category)
microcode += cat.microcode
|
{
"content_hash": "2cebd385dfa1481304cffe7a9df22407",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 52,
"avg_line_length": 29.88888888888889,
"alnum_prop": 0.6728624535315985,
"repo_name": "gem5/gem5",
"id": "53d48190701f64a35978c9cecbd45cdcd7cd48b2",
"size": "2367",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable",
"path": "src/arch/x86/isa/insts/simd128/integer/save_and_restore_state/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "145626"
},
{
"name": "Awk",
"bytes": "3386"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "C",
"bytes": "3927153"
},
{
"name": "C++",
"bytes": "42960484"
},
{
"name": "CMake",
"bytes": "133888"
},
{
"name": "Dockerfile",
"bytes": "34102"
},
{
"name": "Emacs Lisp",
"bytes": "1914"
},
{
"name": "Forth",
"bytes": "354"
},
{
"name": "Fortran",
"bytes": "15436"
},
{
"name": "HTML",
"bytes": "146414"
},
{
"name": "Hack",
"bytes": "139769"
},
{
"name": "Java",
"bytes": "6966"
},
{
"name": "M4",
"bytes": "42624"
},
{
"name": "Makefile",
"bytes": "39573"
},
{
"name": "Perl",
"bytes": "23784"
},
{
"name": "Python",
"bytes": "8079781"
},
{
"name": "Roff",
"bytes": "8754"
},
{
"name": "SCSS",
"bytes": "2971"
},
{
"name": "SWIG",
"bytes": "173"
},
{
"name": "Scala",
"bytes": "5328"
},
{
"name": "Shell",
"bytes": "95638"
},
{
"name": "Starlark",
"bytes": "25668"
},
{
"name": "SuperCollider",
"bytes": "8869"
},
{
"name": "Vim Script",
"bytes": "4343"
},
{
"name": "sed",
"bytes": "3897"
}
],
"symlink_target": ""
}
|
import os
import sys
def is_active():
return True
def get_name():
return "iOS"
def can_build():
import sys
import os
if sys.platform == 'darwin' or os.environ.has_key("OSXCROSS_IOS"):
return True
return False
def get_opts():
return [
('IPHONEPLATFORM', 'name of the iphone platform', 'iPhoneOS'),
('IPHONEPATH', 'the path to iphone toolchain', '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain'),
('IPHONESDK', 'path to the iphone SDK', '/Applications/Xcode.app/Contents/Developer/Platforms/${IPHONEPLATFORM}.platform/Developer/SDKs/${IPHONEPLATFORM}.sdk/'),
('game_center', 'Support for game center', 'yes'),
('store_kit', 'Support for in-app store', 'yes'),
('icloud', 'Support for iCloud', 'yes'),
('ios_gles22_override', 'Force GLES2.0 on iOS', 'yes'),
('ios_exceptions', 'Enable exceptions', 'no'),
('ios_triple', 'Triple for ios toolchain', ''),
('ios_sim', 'Build simulator binary', 'no'),
]
def get_flags():
return [
('tools', 'no'),
]
def configure(env):
env.Append(CPPPATH=['#platform/iphone'])
env['ENV']['PATH'] = env['IPHONEPATH'] + "/Developer/usr/bin/:" + env['ENV']['PATH']
env['CC'] = '$IPHONEPATH/usr/bin/${ios_triple}clang'
env['CXX'] = '$IPHONEPATH/usr/bin/${ios_triple}clang++'
env['AR'] = '$IPHONEPATH/usr/bin/${ios_triple}ar'
env['RANLIB'] = '$IPHONEPATH/usr/bin/${ios_triple}ranlib'
import string
if (env["ios_sim"] == "yes" or env["arch"] == "x86"): # i386, simulator
env["arch"] = "x86"
env["bits"] = "32"
env.Append(CCFLAGS=string.split('-arch i386 -fobjc-abi-version=2 -fobjc-legacy-dispatch -fmessage-length=0 -fpascal-strings -fblocks -fasm-blocks -D__IPHONE_OS_VERSION_MIN_REQUIRED=40100 -isysroot $IPHONESDK -mios-simulator-version-min=4.3 -DCUSTOM_MATRIX_TRANSFORM_H=\\\"build/iphone/matrix4_iphone.h\\\" -DCUSTOM_VECTOR3_TRANSFORM_H=\\\"build/iphone/vector3_iphone.h\\\"'))
elif (env["arch"] == "arm" or env["arch"] == "arm32" or env["arch"] == "armv7" or env["bits"] == "32"): # arm
env["arch"] = "arm"
env["bits"] = "32"
env.Append(CCFLAGS=string.split('-fno-objc-arc -arch armv7 -fmessage-length=0 -fno-strict-aliasing -fdiagnostics-print-source-range-info -fdiagnostics-show-category=id -fdiagnostics-parseable-fixits -fpascal-strings -fblocks -isysroot $IPHONESDK -fvisibility=hidden -mthumb "-DIBOutlet=__attribute__((iboutlet))" "-DIBOutletCollection(ClassName)=__attribute__((iboutletcollection(ClassName)))" "-DIBAction=void)__attribute__((ibaction)" -miphoneos-version-min=9.0 -MMD -MT dependencies'))
else: # armv64
env["arch"] = "arm64"
env["bits"] = "64"
env.Append(CCFLAGS=string.split('-fno-objc-arc -arch arm64 -fmessage-length=0 -fno-strict-aliasing -fdiagnostics-print-source-range-info -fdiagnostics-show-category=id -fdiagnostics-parseable-fixits -fpascal-strings -fblocks -fvisibility=hidden -MMD -MT dependencies -miphoneos-version-min=9.0 -isysroot $IPHONESDK'))
env.Append(CPPFLAGS=['-DNEED_LONG_INT'])
env.Append(CPPFLAGS=['-DLIBYUV_DISABLE_NEON'])
if (env["arch"] == "x86"):
env['IPHONEPLATFORM'] = 'iPhoneSimulator'
env.Append(LINKFLAGS=['-arch', 'i386', '-mios-simulator-version-min=4.3',
'-isysroot', '$IPHONESDK',
#'-mmacosx-version-min=10.6',
'-Xlinker',
'-objc_abi_version',
'-Xlinker', '2',
'-framework', 'AudioToolbox',
'-framework', 'AVFoundation',
'-framework', 'CoreAudio',
'-framework', 'CoreGraphics',
'-framework', 'CoreMedia',
'-framework', 'CoreMotion',
'-framework', 'Foundation',
'-framework', 'Security',
'-framework', 'UIKit',
'-framework', 'MediaPlayer',
'-framework', 'OpenGLES',
'-framework', 'QuartzCore',
'-framework', 'SystemConfiguration',
'-framework', 'GameController',
'-F$IPHONESDK',
])
elif (env["arch"] == "arm64"):
env.Append(LINKFLAGS=['-arch', 'arm64', '-Wl,-dead_strip', '-miphoneos-version-min=9.0',
'-isysroot', '$IPHONESDK',
#'-stdlib=libc++',
'-framework', 'Foundation',
'-framework', 'UIKit',
'-framework', 'CoreGraphics',
'-framework', 'OpenGLES',
'-framework', 'QuartzCore',
'-framework', 'CoreAudio',
'-framework', 'AudioToolbox',
'-framework', 'SystemConfiguration',
'-framework', 'Security',
#'-framework', 'AdSupport',
'-framework', 'MediaPlayer',
'-framework', 'AVFoundation',
'-framework', 'CoreMedia',
'-framework', 'CoreMotion',
'-framework', 'GameController',
])
else:
env.Append(LINKFLAGS=['-arch', 'armv7', '-Wl,-dead_strip', '-miphoneos-version-min=9.0',
'-isysroot', '$IPHONESDK',
'-framework', 'Foundation',
'-framework', 'UIKit',
'-framework', 'CoreGraphics',
'-framework', 'OpenGLES',
'-framework', 'QuartzCore',
'-framework', 'CoreAudio',
'-framework', 'AudioToolbox',
'-framework', 'SystemConfiguration',
'-framework', 'Security',
#'-framework', 'AdSupport',
'-framework', 'MediaPlayer',
'-framework', 'AVFoundation',
'-framework', 'CoreMedia',
'-framework', 'CoreMotion',
'-framework', 'GameController',
])
if env['game_center'] == 'yes':
env.Append(CPPFLAGS=['-DGAME_CENTER_ENABLED'])
env.Append(LINKFLAGS=['-framework', 'GameKit'])
if env['store_kit'] == 'yes':
env.Append(CPPFLAGS=['-DSTOREKIT_ENABLED'])
env.Append(LINKFLAGS=['-framework', 'StoreKit'])
if env['icloud'] == 'yes':
env.Append(CPPFLAGS=['-DICLOUD_ENABLED'])
env.Append(CPPPATH=['$IPHONESDK/usr/include', '$IPHONESDK/System/Library/Frameworks/OpenGLES.framework/Headers', '$IPHONESDK/System/Library/Frameworks/AudioUnit.framework/Headers'])
if (env["target"].startswith("release")):
env.Append(CPPFLAGS=['-DNDEBUG', '-DNS_BLOCK_ASSERTIONS=1'])
env.Append(CPPFLAGS=['-O2', '-flto', '-ftree-vectorize', '-fomit-frame-pointer', '-ffast-math', '-funsafe-math-optimizations'])
env.Append(LINKFLAGS=['-O2', '-flto'])
if env["target"] == "release_debug":
env.Append(CPPFLAGS=['-DDEBUG_ENABLED'])
elif (env["target"] == "debug"):
env.Append(CPPFLAGS=['-D_DEBUG', '-DDEBUG=1', '-gdwarf-2', '-O0', '-DDEBUG_ENABLED'])
env.Append(CPPFLAGS=['-DDEBUG_MEMORY_ENABLED'])
if (env["ios_sim"] == "yes"): # TODO: Check if needed?
env['ENV']['MACOSX_DEPLOYMENT_TARGET'] = '10.6'
env['ENV']['CODESIGN_ALLOCATE'] = '/Developer/Platforms/iPhoneOS.platform/Developer/usr/bin/codesign_allocate'
env.Append(CPPFLAGS=['-DIPHONE_ENABLED', '-DUNIX_ENABLED', '-DGLES2_ENABLED', '-DMPC_FIXED_POINT'])
# TODO: Move that to opus module's config
if("module_opus_enabled" in env and env["module_opus_enabled"] != "no"):
env.opus_fixed_point = "yes"
if env["arch"] == "x86":
pass
elif(env["arch"] == "arm64"):
env.Append(CFLAGS=["-DOPUS_ARM64_OPT"])
else:
env.Append(CFLAGS=["-DOPUS_ARM_OPT"])
if env['ios_exceptions'] == 'yes':
env.Append(CPPFLAGS=['-fexceptions'])
else:
env.Append(CPPFLAGS=['-fno-exceptions'])
# env['neon_enabled']=True
env['S_compiler'] = '$IPHONEPATH/Developer/usr/bin/gcc'
import methods
env.Append(BUILDERS={'GLSL120': env.Builder(action=methods.build_legacygl_headers, suffix='glsl.h', src_suffix='.glsl')})
env.Append(BUILDERS={'GLSL': env.Builder(action=methods.build_glsl_headers, suffix='glsl.h', src_suffix='.glsl')})
env.Append(BUILDERS={'GLSL120GLES': env.Builder(action=methods.build_gles2_headers, suffix='glsl.h', src_suffix='.glsl')})
|
{
"content_hash": "fd3bba5550124361ef5b6df70698e350",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 496,
"avg_line_length": 52.08465608465608,
"alnum_prop": 0.4907557903291345,
"repo_name": "exabon/godot",
"id": "c20d8e90f46298314c7cc85558dc4f243cd8a9d6",
"size": "9844",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "platform/iphone/detect.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "50004"
},
{
"name": "C++",
"bytes": "17000821"
},
{
"name": "HTML",
"bytes": "10302"
},
{
"name": "Java",
"bytes": "497034"
},
{
"name": "Makefile",
"bytes": "451"
},
{
"name": "Objective-C",
"bytes": "2644"
},
{
"name": "Objective-C++",
"bytes": "146830"
},
{
"name": "Python",
"bytes": "268690"
},
{
"name": "Shell",
"bytes": "11105"
}
],
"symlink_target": ""
}
|
"""
ulid/time/time_ns
~~~~~~~~~~~~~~~~~
Implements nanosecond time provider using :func:`~time.time_ns`.
"""
import time
from ... import hints
from . import base
class Provider(base.Provider):
"""
Returns time values from :func:`~time.time_ns`.
This class will only work on python 3.7+.
"""
def milliseconds(self) -> hints.Int:
"""
Get the current time since unix epoch in milliseconds.
:return: Epoch timestamp in milliseconds.
:rtype: :class:`~int`
"""
return time.time_ns() // 1000000
def microseconds(self) -> hints.Int:
"""
Get the current time since unix epoch in microseconds.
:return: Epoch timestamp in microseconds.
:rtype: :class:`~int`
"""
return time.time_ns() // 1000
|
{
"content_hash": "24d702b78d924cf59a11c328a9695938",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 68,
"avg_line_length": 23.485714285714284,
"alnum_prop": 0.5790754257907542,
"repo_name": "ahawker/ulid",
"id": "345389e7cd286420b8cdb90138d587af62d28939",
"size": "822",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ulid/providers/time/nanosecond.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "3387"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "93605"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import copy
import gyp.input
import optparse
import os.path
import re
import shlex
import sys
import traceback
from gyp.common import GypError
try:
# Python 2
string_types = basestring
except NameError:
# Python 3
string_types = str
# Default debug modes for GYP
debug = {}
# List of "official" debug modes, but you can use anything you like.
DEBUG_GENERAL = 'general'
DEBUG_VARIABLES = 'variables'
DEBUG_INCLUDES = 'includes'
def DebugOutput(mode, message, *args):
if 'all' in gyp.debug or mode in gyp.debug:
ctx = ('unknown', 0, 'unknown')
try:
f = traceback.extract_stack(limit=2)
if f:
ctx = f[0][:3]
except:
pass
if args:
message %= args
print('%s:%s:%d:%s %s' % (mode.upper(), os.path.basename(ctx[0]),
ctx[1], ctx[2], message))
def FindBuildFiles():
extension = '.gyp'
files = os.listdir(os.getcwd())
build_files = []
for file in files:
if file.endswith(extension):
build_files.append(file)
return build_files
def Load(build_files, format, default_variables={},
includes=[], depth='.', params=None, check=False,
circular_check=True, duplicate_basename_check=True):
"""
Loads one or more specified build files.
default_variables and includes will be copied before use.
Returns the generator for the specified format and the
data returned by loading the specified build files.
"""
if params is None:
params = {}
if '-' in format:
format, params['flavor'] = format.split('-', 1)
default_variables = copy.copy(default_variables)
# Default variables provided by this program and its modules should be
# named WITH_CAPITAL_LETTERS to provide a distinct "best practice" namespace,
# avoiding collisions with user and automatic variables.
default_variables['GENERATOR'] = format
default_variables['GENERATOR_FLAVOR'] = params.get('flavor', '')
# Format can be a custom python file, or by default the name of a module
# within gyp.generator.
if format.endswith('.py'):
generator_name = os.path.splitext(format)[0]
path, generator_name = os.path.split(generator_name)
# Make sure the path to the custom generator is in sys.path
# Don't worry about removing it once we are done. Keeping the path
# to each generator that is used in sys.path is likely harmless and
# arguably a good idea.
path = os.path.abspath(path)
if path not in sys.path:
sys.path.insert(0, path)
else:
generator_name = 'gyp.generator.' + format
# These parameters are passed in order (as opposed to by key)
# because ActivePython cannot handle key parameters to __import__.
generator = __import__(generator_name, globals(), locals(), generator_name)
for (key, val) in generator.generator_default_variables.items():
default_variables.setdefault(key, val)
# Give the generator the opportunity to set additional variables based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateVariables', None):
generator.CalculateVariables(default_variables, params)
# Give the generator the opportunity to set generator_input_info based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateGeneratorInputInfo', None):
generator.CalculateGeneratorInputInfo(params)
# Fetch the generator specific info that gets fed to input, we use getattr
# so we can default things and the generators only have to provide what
# they need.
generator_input_info = {
'non_configuration_keys':
getattr(generator, 'generator_additional_non_configuration_keys', []),
'path_sections':
getattr(generator, 'generator_additional_path_sections', []),
'extra_sources_for_rules':
getattr(generator, 'generator_extra_sources_for_rules', []),
'generator_supports_multiple_toolsets':
getattr(generator, 'generator_supports_multiple_toolsets', False),
'generator_wants_static_library_dependencies_adjusted':
getattr(generator,
'generator_wants_static_library_dependencies_adjusted', True),
'generator_wants_sorted_dependencies':
getattr(generator, 'generator_wants_sorted_dependencies', False),
'generator_filelist_paths':
getattr(generator, 'generator_filelist_paths', None),
}
# Process the input specific to this generator.
result = gyp.input.Load(build_files, default_variables, includes[:],
depth, generator_input_info, check, circular_check,
duplicate_basename_check,
params['parallel'], params['root_targets'])
return [generator] + result
def NameValueListToDict(name_value_list):
"""
Takes an array of strings of the form 'NAME=VALUE' and creates a dictionary
of the pairs. If a string is simply NAME, then the value in the dictionary
is set to True. If VALUE can be converted to an integer, it is.
"""
result = { }
for item in name_value_list:
tokens = item.split('=', 1)
if len(tokens) == 2:
# If we can make it an int, use that, otherwise, use the string.
try:
token_value = int(tokens[1])
except ValueError:
token_value = tokens[1]
# Set the variable to the supplied value.
result[tokens[0]] = token_value
else:
# No value supplied, treat it as a boolean and set it.
result[tokens[0]] = True
return result
def ShlexEnv(env_name):
flags = os.environ.get(env_name, [])
if flags:
flags = shlex.split(flags)
return flags
def FormatOpt(opt, value):
if opt.startswith('--'):
return '%s=%s' % (opt, value)
return opt + value
def RegenerateAppendFlag(flag, values, predicate, env_name, options):
"""Regenerate a list of command line flags, for an option of action='append'.
The |env_name|, if given, is checked in the environment and used to generate
an initial list of options, then the options that were specified on the
command line (given in |values|) are appended. This matches the handling of
environment variables and command line flags where command line flags override
the environment, while not requiring the environment to be set when the flags
are used again.
"""
flags = []
if options.use_environment and env_name:
for flag_value in ShlexEnv(env_name):
value = FormatOpt(flag, predicate(flag_value))
if value in flags:
flags.remove(value)
flags.append(value)
if values:
for flag_value in values:
flags.append(FormatOpt(flag, predicate(flag_value)))
return flags
def RegenerateFlags(options):
"""Given a parsed options object, and taking the environment variables into
account, returns a list of flags that should regenerate an equivalent options
object (even in the absence of the environment variables.)
Any path options will be normalized relative to depth.
The format flag is not included, as it is assumed the calling generator will
set that as appropriate.
"""
def FixPath(path):
path = gyp.common.FixIfRelativePath(path, options.depth)
if not path:
return os.path.curdir
return path
def Noop(value):
return value
# We always want to ignore the environment when regenerating, to avoid
# duplicate or changed flags in the environment at the time of regeneration.
flags = ['--ignore-environment']
for name, metadata in options._regeneration_metadata.items():
opt = metadata['opt']
value = getattr(options, name)
value_predicate = metadata['type'] == 'path' and FixPath or Noop
action = metadata['action']
env_name = metadata['env_name']
if action == 'append':
flags.extend(RegenerateAppendFlag(opt, value, value_predicate,
env_name, options))
elif action in ('store', None): # None is a synonym for 'store'.
if value:
flags.append(FormatOpt(opt, value_predicate(value)))
elif options.use_environment and env_name and os.environ.get(env_name):
flags.append(FormatOpt(opt, value_predicate(os.environ.get(env_name))))
elif action in ('store_true', 'store_false'):
if ((action == 'store_true' and value) or
(action == 'store_false' and not value)):
flags.append(opt)
elif options.use_environment and env_name:
print('Warning: environment regeneration unimplemented '
'for %s flag %r env_name %r' % (action, opt,
env_name), file=sys.stderr)
else:
print('Warning: regeneration unimplemented for action %r '
'flag %r' % (action, opt), file=sys.stderr)
return flags
class RegeneratableOptionParser(optparse.OptionParser):
def __init__(self):
self.__regeneratable_options = {}
optparse.OptionParser.__init__(self)
def add_option(self, *args, **kw):
"""Add an option to the parser.
This accepts the same arguments as OptionParser.add_option, plus the
following:
regenerate: can be set to False to prevent this option from being included
in regeneration.
env_name: name of environment variable that additional values for this
option come from.
type: adds type='path', to tell the regenerator that the values of
this option need to be made relative to options.depth
"""
env_name = kw.pop('env_name', None)
if 'dest' in kw and kw.pop('regenerate', True):
dest = kw['dest']
# The path type is needed for regenerating, for optparse we can just treat
# it as a string.
type = kw.get('type')
if type == 'path':
kw['type'] = 'string'
self.__regeneratable_options[dest] = {
'action': kw.get('action'),
'type': type,
'env_name': env_name,
'opt': args[0],
}
optparse.OptionParser.add_option(self, *args, **kw)
def parse_args(self, *args):
values, args = optparse.OptionParser.parse_args(self, *args)
values._regeneration_metadata = self.__regeneratable_options
return values, args
def gyp_main(args):
my_name = os.path.basename(sys.argv[0])
parser = RegeneratableOptionParser()
usage = 'usage: %s [options ...] [build_file ...]'
parser.set_usage(usage.replace('%s', '%prog'))
parser.add_option('--build', dest='configs', action='append',
help='configuration for build after project generation')
parser.add_option('--check', dest='check', action='store_true',
help='check format of gyp files')
parser.add_option('--config-dir', dest='config_dir', action='store',
env_name='GYP_CONFIG_DIR', default=None,
help='The location for configuration files like '
'include.gypi.')
parser.add_option('-d', '--debug', dest='debug', metavar='DEBUGMODE',
action='append', default=[], help='turn on a debugging '
'mode for debugging GYP. Supported modes are "variables", '
'"includes" and "general" or "all" for all of them.')
parser.add_option('-D', dest='defines', action='append', metavar='VAR=VAL',
env_name='GYP_DEFINES',
help='sets variable VAR to value VAL')
parser.add_option('--depth', dest='depth', metavar='PATH', type='path',
help='set DEPTH gyp variable to a relative path to PATH')
parser.add_option('-f', '--format', dest='formats', action='append',
env_name='GYP_GENERATORS', regenerate=False,
help='output formats to generate')
parser.add_option('-G', dest='generator_flags', action='append', default=[],
metavar='FLAG=VAL', env_name='GYP_GENERATOR_FLAGS',
help='sets generator flag FLAG to VAL')
parser.add_option('--generator-output', dest='generator_output',
action='store', default=None, metavar='DIR', type='path',
env_name='GYP_GENERATOR_OUTPUT',
help='puts generated build files under DIR')
parser.add_option('--ignore-environment', dest='use_environment',
action='store_false', default=True, regenerate=False,
help='do not read options from environment variables')
parser.add_option('-I', '--include', dest='includes', action='append',
metavar='INCLUDE', type='path',
help='files to include in all loaded .gyp files')
# --no-circular-check disables the check for circular relationships between
# .gyp files. These relationships should not exist, but they've only been
# observed to be harmful with the Xcode generator. Chromium's .gyp files
# currently have some circular relationships on non-Mac platforms, so this
# option allows the strict behavior to be used on Macs and the lenient
# behavior to be used elsewhere.
# TODO(mark): Remove this option when http://crbug.com/35878 is fixed.
parser.add_option('--no-circular-check', dest='circular_check',
action='store_false', default=True, regenerate=False,
help="don't check for circular relationships between files")
# --no-duplicate-basename-check disables the check for duplicate basenames
# in a static_library/shared_library project. Visual C++ 2008 generator
# doesn't support this configuration. Libtool on Mac also generates warnings
# when duplicate basenames are passed into Make generator on Mac.
# TODO(yukawa): Remove this option when these legacy generators are
# deprecated.
parser.add_option('--no-duplicate-basename-check',
dest='duplicate_basename_check', action='store_false',
default=True, regenerate=False,
help="don't check for duplicate basenames")
parser.add_option('--no-parallel', action='store_true', default=False,
help='Disable multiprocessing')
parser.add_option('-S', '--suffix', dest='suffix', default='',
help='suffix to add to generated files')
parser.add_option('--toplevel-dir', dest='toplevel_dir', action='store',
default=None, metavar='DIR', type='path',
help='directory to use as the root of the source tree')
parser.add_option('-R', '--root-target', dest='root_targets',
action='append', metavar='TARGET',
help='include only TARGET and its deep dependencies')
options, build_files_arg = parser.parse_args(args)
build_files = build_files_arg
# Set up the configuration directory (defaults to ~/.gyp)
if not options.config_dir:
home = None
home_dot_gyp = None
if options.use_environment:
home_dot_gyp = os.environ.get('GYP_CONFIG_DIR', None)
if home_dot_gyp:
home_dot_gyp = os.path.expanduser(home_dot_gyp)
if not home_dot_gyp:
home_vars = ['HOME']
if sys.platform in ('cygwin', 'win32'):
home_vars.append('USERPROFILE')
for home_var in home_vars:
home = os.getenv(home_var)
if home != None:
home_dot_gyp = os.path.join(home, '.gyp')
if not os.path.exists(home_dot_gyp):
home_dot_gyp = None
else:
break
else:
home_dot_gyp = os.path.expanduser(options.config_dir)
if home_dot_gyp and not os.path.exists(home_dot_gyp):
home_dot_gyp = None
if not options.formats:
# If no format was given on the command line, then check the env variable.
generate_formats = []
if options.use_environment:
generate_formats = os.environ.get('GYP_GENERATORS', [])
if generate_formats:
generate_formats = re.split(r'[\s,]', generate_formats)
if generate_formats:
options.formats = generate_formats
else:
# Nothing in the variable, default based on platform.
if sys.platform == 'darwin':
options.formats = ['xcode']
elif sys.platform in ('win32', 'cygwin'):
options.formats = ['msvs']
else:
options.formats = ['make']
if not options.generator_output and options.use_environment:
g_o = os.environ.get('GYP_GENERATOR_OUTPUT')
if g_o:
options.generator_output = g_o
options.parallel = not options.no_parallel
for mode in options.debug:
gyp.debug[mode] = 1
# Do an extra check to avoid work when we're not debugging.
if DEBUG_GENERAL in gyp.debug:
DebugOutput(DEBUG_GENERAL, 'running with these options:')
for option, value in sorted(options.__dict__.items()):
if option[0] == '_':
continue
if isinstance(value, string_types):
DebugOutput(DEBUG_GENERAL, " %s: '%s'", option, value)
else:
DebugOutput(DEBUG_GENERAL, " %s: %s", option, value)
if not build_files:
build_files = FindBuildFiles()
if not build_files:
raise GypError((usage + '\n\n%s: error: no build_file') %
(my_name, my_name))
# TODO(mark): Chromium-specific hack!
# For Chromium, the gyp "depth" variable should always be a relative path
# to Chromium's top-level "src" directory. If no depth variable was set
# on the command line, try to find a "src" directory by looking at the
# absolute path to each build file's directory. The first "src" component
# found will be treated as though it were the path used for --depth.
if not options.depth:
for build_file in build_files:
build_file_dir = os.path.abspath(os.path.dirname(build_file))
build_file_dir_components = build_file_dir.split(os.path.sep)
components_len = len(build_file_dir_components)
for index in range(components_len - 1, -1, -1):
if build_file_dir_components[index] == 'src':
options.depth = os.path.sep.join(build_file_dir_components)
break
del build_file_dir_components[index]
# If the inner loop found something, break without advancing to another
# build file.
if options.depth:
break
if not options.depth:
raise GypError('Could not automatically locate src directory. This is'
'a temporary Chromium feature that will be removed. Use'
'--depth as a workaround.')
# If toplevel-dir is not set, we assume that depth is the root of our source
# tree.
if not options.toplevel_dir:
options.toplevel_dir = options.depth
# -D on the command line sets variable defaults - D isn't just for define,
# it's for default. Perhaps there should be a way to force (-F?) a
# variable's value so that it can't be overridden by anything else.
cmdline_default_variables = {}
defines = []
if options.use_environment:
defines += ShlexEnv('GYP_DEFINES')
if options.defines:
defines += options.defines
cmdline_default_variables = NameValueListToDict(defines)
if DEBUG_GENERAL in gyp.debug:
DebugOutput(DEBUG_GENERAL,
"cmdline_default_variables: %s", cmdline_default_variables)
# Set up includes.
includes = []
# If ~/.gyp/include.gypi exists, it'll be forcibly included into every
# .gyp file that's loaded, before anything else is included.
if home_dot_gyp != None:
default_include = os.path.join(home_dot_gyp, 'include.gypi')
if os.path.exists(default_include):
print('Using overrides found in ' + default_include)
includes.append(default_include)
# Command-line --include files come after the default include.
if options.includes:
includes.extend(options.includes)
# Generator flags should be prefixed with the target generator since they
# are global across all generator runs.
gen_flags = []
if options.use_environment:
gen_flags += ShlexEnv('GYP_GENERATOR_FLAGS')
if options.generator_flags:
gen_flags += options.generator_flags
generator_flags = NameValueListToDict(gen_flags)
if DEBUG_GENERAL in gyp.debug.keys():
DebugOutput(DEBUG_GENERAL, "generator_flags: %s", generator_flags)
# Generate all requested formats (use a set in case we got one format request
# twice)
for format in set(options.formats):
params = {'options': options,
'build_files': build_files,
'generator_flags': generator_flags,
'cwd': os.getcwd(),
'build_files_arg': build_files_arg,
'gyp_binary': sys.argv[0],
'home_dot_gyp': home_dot_gyp,
'parallel': options.parallel,
'root_targets': options.root_targets,
'target_arch': cmdline_default_variables.get('target_arch', '')}
# Start with the default variables from the command line.
[generator, flat_list, targets, data] = Load(
build_files, format, cmdline_default_variables, includes, options.depth,
params, options.check, options.circular_check,
options.duplicate_basename_check)
# TODO(mark): Pass |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
# NOTE: flat_list is the flattened dependency graph specifying the order
# that targets may be built. Build systems that operate serially or that
# need to have dependencies defined before dependents reference them should
# generate targets in the order specified in flat_list.
generator.GenerateOutput(flat_list, targets, data, params)
if options.configs:
valid_configs = targets[flat_list[0]]['configurations'].keys()
for conf in options.configs:
if conf not in valid_configs:
raise GypError('Invalid config specified via --build: %s' % conf)
generator.PerformBuild(data, options.configs, params)
# Done
return 0
def main(args):
try:
return gyp_main(args)
except GypError as e:
sys.stderr.write("gyp: %s\n" % e)
return 1
# NOTE: setuptools generated console_scripts calls function with no arguments
def script_main():
return main(sys.argv[1:])
if __name__ == '__main__':
sys.exit(script_main())
|
{
"content_hash": "694b4e3d7fb11719cc559d75aec9253d",
"timestamp": "",
"source": "github",
"line_count": 551,
"max_line_length": 88,
"avg_line_length": 40.1978221415608,
"alnum_prop": 0.6528962932863787,
"repo_name": "refack/node-gyp",
"id": "d5fa9ecf501845e12495d46fbf6a2f55c2ca1725",
"size": "22329",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "gyp/pylib/gyp/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "397"
},
{
"name": "C#",
"bytes": "9566"
},
{
"name": "C++",
"bytes": "1475"
},
{
"name": "Emacs Lisp",
"bytes": "14357"
},
{
"name": "JavaScript",
"bytes": "116850"
},
{
"name": "Python",
"bytes": "1246997"
},
{
"name": "Shell",
"bytes": "7180"
}
],
"symlink_target": ""
}
|
from rest_framework import serializers as ser
from rest_framework import exceptions
from modularodm import Q
from modularodm.exceptions import ValidationValueError
from framework.auth.core import Auth
from framework.exceptions import PermissionsError
from django.conf import settings
from website.project.metadata.schemas import ACTIVE_META_SCHEMAS, LATEST_SCHEMA_VERSION
from website.project.metadata.utils import is_prereg_admin_not_project_admin
from website.models import Node, User, Comment, Institution, MetaSchema, DraftRegistration
from website.exceptions import NodeStateError
from website.util import permissions as osf_permissions
from website.project.model import NodeUpdateError
from api.base.utils import get_user_auth, get_object_or_error, absolute_reverse
from api.base.serializers import (JSONAPISerializer, WaterbutlerLink, NodeFileHyperLinkField, IDField, TypeField,
TargetTypeField, JSONAPIListField, LinksField, RelationshipField,
HideIfRegistration, RestrictedDictSerializer,
JSONAPIRelationshipSerializer, relationship_diff)
from api.base.exceptions import InvalidModelValueError, RelationshipPostMakesNoChanges
class NodeTagField(ser.Field):
def to_representation(self, obj):
if obj is not None:
return obj._id
return None
def to_internal_value(self, data):
return data
class NodeLicenseSerializer(RestrictedDictSerializer):
copyright_holders = ser.ListField(allow_empty=True, read_only=True)
year = ser.CharField(allow_blank=True, read_only=True)
class NodeSerializer(JSONAPISerializer):
# TODO: If we have to redo this implementation in any of the other serializers, subclass ChoiceField and make it
# handle blank choices properly. Currently DRF ChoiceFields ignore blank options, which is incorrect in this
# instance
filterable_fields = frozenset([
'id',
'title',
'description',
'public',
'tags',
'category',
'date_created',
'date_modified',
'root',
'parent',
'contributors'
])
non_anonymized_fields = [
'id',
'title',
'description',
'category',
'date_created',
'date_modified',
'registration',
'tags',
'public',
'license',
'links',
'children',
'comments',
'contributors',
'files',
'node_links',
'parent',
'root',
'logs',
'wikis'
]
id = IDField(source='_id', read_only=True)
type = TypeField()
category_choices = settings.NODE_CATEGORY_MAP.items()
category_choices_string = ', '.join(["'{}'".format(choice[0]) for choice in category_choices])
title = ser.CharField(required=True)
description = ser.CharField(required=False, allow_blank=True, allow_null=True)
category = ser.ChoiceField(choices=category_choices, help_text='Choices: ' + category_choices_string)
date_created = ser.DateTimeField(read_only=True)
date_modified = ser.DateTimeField(read_only=True)
registration = ser.BooleanField(read_only=True, source='is_registration')
fork = ser.BooleanField(read_only=True, source='is_fork')
collection = ser.BooleanField(read_only=True, source='is_collection')
tags = JSONAPIListField(child=NodeTagField(), required=False)
node_license = NodeLicenseSerializer(read_only=True, required=False)
template_from = ser.CharField(required=False, allow_blank=False, allow_null=False,
help_text='Specify a node id for a node you would like to use as a template for the '
'new node. Templating is like forking, except that you do not copy the '
'files, only the project structure. Some information is changed on the top '
'level project by submitting the appropriate fields in the request body, '
'and some information will not change. By default, the description will '
'be cleared and the project will be made private.')
current_user_permissions = ser.SerializerMethodField(help_text='List of strings representing the permissions '
'for the current user on this node.')
# Public is only write-able by admins--see update method
public = ser.BooleanField(source='is_public', required=False,
help_text='Nodes that are made public will give read-only access '
'to everyone. Private nodes require explicit read '
'permission. Write and admin access are the same for '
'public and private nodes. Administrators on a parent '
'node have implicit read permissions for all child nodes')
links = LinksField({'html': 'get_absolute_html_url'})
# TODO: When we have osf_permissions.ADMIN permissions, make this writable for admins
license = RelationshipField(
related_view='licenses:license-detail',
related_view_kwargs={'license_id': '<node_license.node_license._id>'},
)
children = RelationshipField(
related_view='nodes:node-children',
related_view_kwargs={'node_id': '<pk>'},
related_meta={'count': 'get_node_count'},
)
comments = RelationshipField(
related_view='nodes:node-comments',
related_view_kwargs={'node_id': '<pk>'},
related_meta={'unread': 'get_unread_comments_count'})
contributors = RelationshipField(
related_view='nodes:node-contributors',
related_view_kwargs={'node_id': '<pk>'},
related_meta={'count': 'get_contrib_count'},
)
files = RelationshipField(
related_view='nodes:node-providers',
related_view_kwargs={'node_id': '<pk>'}
)
wikis = RelationshipField(
related_view='nodes:node-wikis',
related_view_kwargs={'node_id': '<pk>'}
)
forked_from = RelationshipField(
related_view=lambda n: 'registrations:registration-detail' if getattr(n, 'is_registration', False) else 'nodes:node-detail',
related_view_kwargs={'node_id': '<forked_from_id>'}
)
forks = RelationshipField(
related_view='nodes:node-forks',
related_view_kwargs={'node_id': '<pk>'}
)
node_links = RelationshipField(
related_view='nodes:node-pointers',
related_view_kwargs={'node_id': '<pk>'},
related_meta={'count': 'get_pointers_count'},
)
parent = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<parent_node._id>'},
filter_key='parent_node'
)
draft_registrations = HideIfRegistration(RelationshipField(
related_view='nodes:node-draft-registrations',
related_view_kwargs={'node_id': '<pk>'}
))
registrations = HideIfRegistration(RelationshipField(
related_view='nodes:node-registrations',
related_view_kwargs={'node_id': '<pk>'},
related_meta={'count': 'get_registration_count'}
))
affiliated_institutions = RelationshipField(
related_view='nodes:node-institutions',
related_view_kwargs={'node_id': '<pk>'},
self_view='nodes:node-relationships-institutions',
self_view_kwargs={'node_id': '<pk>'}
)
root = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<root._id>'}
)
logs = RelationshipField(
related_view='nodes:node-logs',
related_view_kwargs={'node_id': '<pk>'},
related_meta={'count': 'get_logs_count'}
)
def get_current_user_permissions(self, obj):
user = self.context['request'].user
if user.is_anonymous():
return ['read']
permissions = obj.get_permissions(user=user)
if not permissions:
permissions = ['read']
return permissions
class Meta:
type_ = 'nodes'
def get_absolute_url(self, obj):
return obj.get_absolute_url()
# TODO: See if we can get the count filters into the filter rather than the serializer.
def get_logs_count(self, obj):
return len(obj.logs)
def get_node_count(self, obj):
auth = get_user_auth(self.context['request'])
nodes = [node for node in obj.nodes if node.can_view(auth) and node.primary and not node.is_deleted]
return len(nodes)
def get_contrib_count(self, obj):
return len(obj.contributors)
def get_registration_count(self, obj):
auth = get_user_auth(self.context['request'])
registrations = [node for node in obj.registrations_all if node.can_view(auth)]
return len(registrations)
def get_pointers_count(self, obj):
return len(obj.nodes_pointer)
def get_unread_comments_count(self, obj):
user = get_user_auth(self.context['request']).user
node_comments = Comment.find_n_unread(user=user, node=obj, page='node')
return {
'node': node_comments
}
def create(self, validated_data):
if 'template_from' in validated_data:
request = self.context['request']
user = request.user
template_from = validated_data.pop('template_from')
template_node = Node.load(key=template_from)
if template_node is None:
raise exceptions.NotFound
if not template_node.has_permission(user, 'read', check_parent=False):
raise exceptions.PermissionDenied
validated_data.pop('creator')
changed_data = {template_from: validated_data}
node = template_node.use_as_template(auth=get_user_auth(request), changes=changed_data)
else:
node = Node(**validated_data)
try:
node.save()
except ValidationValueError as e:
raise InvalidModelValueError(detail=e.message)
return node
def update(self, node, validated_data):
"""Update instance with the validated data. Requires
the request to be in the serializer context.
"""
assert isinstance(node, Node), 'node must be a Node'
auth = get_user_auth(self.context['request'])
old_tags = set([tag._id for tag in node.tags])
if 'tags' in validated_data:
current_tags = set(validated_data.pop('tags', []))
elif self.partial:
current_tags = set(old_tags)
else:
current_tags = set()
for new_tag in (current_tags - old_tags):
node.add_tag(new_tag, auth=auth)
for deleted_tag in (old_tags - current_tags):
node.remove_tag(deleted_tag, auth=auth)
if validated_data:
try:
node.update(validated_data, auth=auth)
except ValidationValueError as e:
raise InvalidModelValueError(detail=e.message)
except PermissionsError:
raise exceptions.PermissionDenied
except NodeUpdateError as e:
raise exceptions.ValidationError(detail=e.reason)
except NodeStateError as e:
raise InvalidModelValueError(detail=e.message)
return node
class NodeDetailSerializer(NodeSerializer):
"""
Overrides NodeSerializer to make id required.
"""
id = IDField(source='_id', required=True)
class NodeForksSerializer(NodeSerializer):
category_choices = settings.NODE_CATEGORY_MAP.items()
category_choices_string = ', '.join(["'{}'".format(choice[0]) for choice in category_choices])
title = ser.CharField(required=False)
category = ser.ChoiceField(read_only=True, choices=category_choices, help_text='Choices: ' + category_choices_string)
forked_date = ser.DateTimeField(read_only=True)
def create(self, validated_data):
node = validated_data.pop('node')
fork_title = validated_data.pop('title', None)
request = self.context['request']
auth = get_user_auth(request)
fork = node.fork_node(auth, title=fork_title)
try:
fork.save()
except ValidationValueError as e:
raise InvalidModelValueError(detail=e.message)
return fork
class NodeContributorsSerializer(JSONAPISerializer):
""" Separate from UserSerializer due to necessity to override almost every field as read only
"""
non_anonymized_fields = ['bibliographic', 'permission']
filterable_fields = frozenset([
'id',
'bibliographic',
'permission'
])
id = IDField(source='_id', required=True)
type = TypeField()
bibliographic = ser.BooleanField(help_text='Whether the user will be included in citations for this node or not.',
default=True)
permission = ser.ChoiceField(choices=osf_permissions.PERMISSIONS, required=False, allow_null=True,
default=osf_permissions.reduce_permissions(osf_permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS),
help_text='User permission level. Must be "read", "write", or "admin". Defaults to "write".')
unregistered_contributor = ser.SerializerMethodField()
links = LinksField({
'self': 'get_absolute_url'
})
users = RelationshipField(
related_view='users:user-detail',
related_view_kwargs={'user_id': '<pk>'},
always_embed=True
)
class Meta:
type_ = 'contributors'
def get_absolute_url(self, obj):
node_id = self.context['request'].parser_context['kwargs']['node_id']
return absolute_reverse(
'nodes:node-contributor-detail',
kwargs={
'node_id': node_id,
'user_id': obj._id
}
)
def get_unregistered_contributor(self, obj):
unclaimed_records = obj.unclaimed_records.get(obj.node_id, None)
if unclaimed_records:
return unclaimed_records.get('name', None)
class NodeContributorsCreateSerializer(NodeContributorsSerializer):
"""
Overrides NodeContributorsSerializer to add target_type field
"""
target_type = TargetTypeField(target_type='users')
def create(self, validated_data):
auth = Auth(self.context['request'].user)
node = self.context['view'].get_node()
contributor = get_object_or_error(User, validated_data['_id'], display_name='user')
# Node object checks for contributor existence but can still change permissions anyway
if contributor in node.contributors:
raise exceptions.ValidationError('{} is already a contributor'.format(contributor.fullname))
bibliographic = validated_data['bibliographic']
permissions = osf_permissions.expand_permissions(validated_data.get('permission')) or osf_permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS
node.add_contributor(contributor=contributor, auth=auth, visible=bibliographic, permissions=permissions, save=True)
contributor.permission = osf_permissions.reduce_permissions(node.get_permissions(contributor))
contributor.bibliographic = node.get_visible(contributor)
contributor.node_id = node._id
return contributor
class NodeContributorDetailSerializer(NodeContributorsSerializer):
"""
Overrides node contributor serializer to add additional methods
"""
def update(self, instance, validated_data):
contributor = instance
auth = Auth(self.context['request'].user)
node = self.context['view'].get_node()
visible = validated_data.get('bibliographic')
permission = validated_data.get('permission')
try:
node.update_contributor(contributor, permission, visible, auth, save=True)
except NodeStateError as e:
raise exceptions.ValidationError(detail=e.message)
except ValueError as e:
raise exceptions.ValidationError(detail=e.message)
contributor.permission = osf_permissions.reduce_permissions(node.get_permissions(contributor))
contributor.bibliographic = node.get_visible(contributor)
contributor.node_id = node._id
return contributor
class NodeLinksSerializer(JSONAPISerializer):
id = IDField(source='_id')
type = TypeField()
target_type = TargetTypeField(target_type='nodes')
# TODO: We don't show the title because the current user may not have access to this node. We may want to conditionally
# include this field in the future.
# title = ser.CharField(read_only=True, source='node.title', help_text='The title of the node that this Node Link '
# 'points to')
target_node = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<pk>'},
always_embed=True
)
class Meta:
type_ = 'node_links'
links = LinksField({
'self': 'get_absolute_url'
})
def get_absolute_url(self, obj):
node_id = self.context['request'].parser_context['kwargs']['node_id']
return absolute_reverse(
'nodes:node-pointer-detail',
kwargs={
'node_id': node_id,
'node_link_id': obj._id
}
)
def create(self, validated_data):
request = self.context['request']
user = request.user
auth = Auth(user)
node = self.context['view'].get_node()
target_node_id = validated_data['_id']
pointer_node = Node.load(target_node_id)
if not pointer_node or pointer_node.is_collection:
raise InvalidModelValueError(
source={'pointer': '/data/relationships/node_links/data/id'},
detail='Target Node \'{}\' not found.'.format(target_node_id)
)
try:
pointer = node.add_pointer(pointer_node, auth, save=True)
return pointer
except ValueError:
raise InvalidModelValueError(
source={'pointer': '/data/relationships/node_links/data/id'},
detail='Target Node \'{}\' already pointed to by \'{}\'.'.format(target_node_id, node._id)
)
def update(self, instance, validated_data):
pass
class NodeProviderSerializer(JSONAPISerializer):
id = ser.SerializerMethodField(read_only=True)
kind = ser.CharField(read_only=True)
name = ser.CharField(read_only=True)
path = ser.CharField(read_only=True)
node = ser.CharField(source='node_id', read_only=True)
provider = ser.CharField(read_only=True)
files = NodeFileHyperLinkField(
related_view='nodes:node-files',
related_view_kwargs={'node_id': '<node_id>', 'path': '<path>', 'provider': '<provider>'},
kind='folder',
never_embed=True
)
links = LinksField({
'upload': WaterbutlerLink(),
'new_folder': WaterbutlerLink(kind='folder')
})
class Meta:
type_ = 'files'
@staticmethod
def get_id(obj):
return '{}:{}'.format(obj.node._id, obj.provider)
def get_absolute_url(self, obj):
return absolute_reverse(
'nodes:node-provider-detail',
kwargs={
'node_id': obj.node._id,
'provider': obj.provider
}
)
class InstitutionRelated(JSONAPIRelationshipSerializer):
id = ser.CharField(source='_id', required=False, allow_null=True)
class Meta:
type_ = 'institutions'
class NodeInstitutionsRelationshipSerializer(ser.Serializer):
data = ser.ListField(child=InstitutionRelated())
links = LinksField({'self': 'get_self_url',
'html': 'get_related_url'})
def get_self_url(self, obj):
return obj['self'].institutions_relationship_url()
def get_related_url(self, obj):
return obj['self'].institutions_url()
class Meta:
type_ = 'institutions'
def get_institutions_to_add_remove(self, institutions, new_institutions):
diff = relationship_diff(
current_items={inst._id: inst for inst in institutions},
new_items={inst['_id']: inst for inst in new_institutions}
)
insts_to_add = []
for inst_id in diff['add']:
inst = Institution.load(inst_id)
if not inst:
raise exceptions.NotFound(detail='Institution with id "{}" was not found'.format(inst_id))
insts_to_add.append(inst)
return insts_to_add, diff['remove'].values()
def make_instance_obj(self, obj):
return {
'data': obj.affiliated_institutions,
'self': obj
}
def update(self, instance, validated_data):
node = instance['self']
user = self.context['request'].user
add, remove = self.get_institutions_to_add_remove(
institutions=instance['data'],
new_institutions=validated_data['data']
)
for inst in add:
if inst not in user.affiliated_institutions:
raise exceptions.PermissionDenied(detail='User needs to be affiliated with {}'.format(inst.name))
for inst in remove:
node.remove_affiliated_institution(inst, user)
for inst in add:
node.add_affiliated_institution(inst, user)
node.save()
return self.make_instance_obj(node)
def create(self, validated_data):
instance = self.context['view'].get_object()
user = self.context['request'].user
node = instance['self']
add, remove = self.get_institutions_to_add_remove(
institutions=instance['data'],
new_institutions=validated_data['data']
)
if not len(add):
raise RelationshipPostMakesNoChanges
for inst in add:
if inst not in user.affiliated_institutions:
raise exceptions.PermissionDenied(detail='User needs to be affiliated with {}'.format(inst.name))
for inst in add:
node.add_affiliated_institution(inst, user)
node.save()
return self.make_instance_obj(node)
class NodeAlternativeCitationSerializer(JSONAPISerializer):
id = IDField(source='_id', read_only=True)
type = TypeField()
name = ser.CharField(required=True)
text = ser.CharField(required=True)
class Meta:
type_ = 'citations'
def create(self, validated_data):
errors = self.error_checker(validated_data)
if len(errors) > 0:
raise exceptions.ValidationError(detail=errors)
node = self.context['view'].get_node()
auth = Auth(self.context['request']._user)
citation = node.add_citation(auth, save=True, **validated_data)
return citation
def update(self, instance, validated_data):
errors = self.error_checker(validated_data)
if len(errors) > 0:
raise exceptions.ValidationError(detail=errors)
node = self.context['view'].get_node()
auth = Auth(self.context['request']._user)
instance = node.edit_citation(auth, instance, save=True, **validated_data)
return instance
def error_checker(self, data):
errors = []
name = data.get('name', None)
text = data.get('text', None)
citations = self.context['view'].get_node().alternative_citations
if not (self.instance and self.instance.name == name) and citations.find(Q('name', 'eq', name)).count() > 0:
errors.append("There is already a citation named '{}'".format(name))
if not (self.instance and self.instance.text == text):
matching_citations = citations.find(Q('text', 'eq', text))
if matching_citations.count() > 0:
names = "', '".join([str(citation.name) for citation in matching_citations])
errors.append("Citation matches '{}'".format(names))
return errors
def get_absolute_url(self, obj):
# Citations don't have urls
raise NotImplementedError
class DraftRegistrationSerializer(JSONAPISerializer):
id = IDField(source='_id', read_only=True)
type = TypeField()
registration_supplement = ser.CharField(source='registration_schema._id', required=True)
registration_metadata = ser.DictField(required=False)
datetime_initiated = ser.DateTimeField(read_only=True)
datetime_updated = ser.DateTimeField(read_only=True)
branched_from = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<branched_from._id>'}
)
initiator = RelationshipField(
related_view='users:user-detail',
related_view_kwargs={'user_id': '<initiator._id>'},
)
registration_schema = RelationshipField(
related_view='metaschemas:metaschema-detail',
related_view_kwargs={'metaschema_id': '<registration_schema._id>'}
)
links = LinksField({
'html': 'get_absolute_url'
})
def get_absolute_url(self, obj):
return obj.absolute_url
def create(self, validated_data):
node = validated_data.pop('node')
initiator = validated_data.pop('initiator')
metadata = validated_data.pop('registration_metadata', None)
schema_id = validated_data.pop('registration_schema').get('_id')
schema = get_object_or_error(MetaSchema, schema_id)
if schema.schema_version != LATEST_SCHEMA_VERSION or schema.name not in ACTIVE_META_SCHEMAS:
raise exceptions.ValidationError('Registration supplement must be an active schema.')
draft = DraftRegistration.create_from_node(node=node, user=initiator, schema=schema)
reviewer = is_prereg_admin_not_project_admin(self.context['request'], draft)
if metadata:
try:
# Required fields are only required when creating the actual registration, not updating the draft.
draft.validate_metadata(metadata=metadata, reviewer=reviewer, required_fields=False)
except ValidationValueError as e:
raise exceptions.ValidationError(e.message)
draft.update_metadata(metadata)
draft.save()
return draft
class Meta:
type_ = 'draft_registrations'
class DraftRegistrationDetailSerializer(DraftRegistrationSerializer):
"""
Overrides DraftRegistrationSerializer to make id and registration_metadata required.
registration_supplement cannot be changed after draft has been created.
Also makes registration_supplement read-only.
"""
id = IDField(source='_id', required=True)
registration_metadata = ser.DictField(required=True)
registration_supplement = ser.CharField(read_only=True, source='registration_schema._id')
def update(self, draft, validated_data):
"""
Update draft instance with the validated metadata.
"""
metadata = validated_data.pop('registration_metadata', None)
reviewer = is_prereg_admin_not_project_admin(self.context['request'], draft)
if metadata:
try:
# Required fields are only required when creating the actual registration, not updating the draft.
draft.validate_metadata(metadata=metadata, reviewer=reviewer, required_fields=False)
except ValidationValueError as e:
raise exceptions.ValidationError(e.message)
draft.update_metadata(metadata)
draft.save()
return draft
|
{
"content_hash": "024b1c2433dbdbbdd6ddc4225e6b1921",
"timestamp": "",
"source": "github",
"line_count": 737,
"max_line_length": 141,
"avg_line_length": 37.87516960651289,
"alnum_prop": 0.6255284086838145,
"repo_name": "kwierman/osf.io",
"id": "b62fd76e18c5dad184870b15ee31d5c2f17a10c5",
"size": "27914",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "api/nodes/serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "157999"
},
{
"name": "HTML",
"bytes": "110361"
},
{
"name": "JavaScript",
"bytes": "1632320"
},
{
"name": "Mako",
"bytes": "672278"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "5622591"
}
],
"symlink_target": ""
}
|
from openstack.network import network_service
from openstack import resource2 as resource
class Listener(resource.Resource):
resource_key = 'listener'
resources_key = 'listeners'
base_path = '/lbaas/listeners'
service = network_service.NetworkService()
# capabilities
allow_create = True
allow_get = True
allow_update = True
allow_delete = True
allow_list = True
_query_mapping = resource.QueryParameters(
'connection_limit', 'default_pool_id', 'default_tls_container_ref',
'description', 'name', 'project_id', 'protocol', 'protocol_port',
is_admin_state_up='admin_state_up'
)
# Properties
#: The maximum number of connections permitted for this load balancer.
#: Default is infinite.
connection_limit = resource.Body('connection_limit')
#: ID of default pool. Must have compatible protocol with listener.
default_pool_id = resource.Body('default_pool_id')
#: A reference to a container of TLS secrets.
default_tls_container_ref = resource.Body('default_tls_container_ref')
#: Description for the listener.
description = resource.Body('description')
#: The administrative state of the listener, which is up
#: ``True`` or down ``False``. *Type: bool*
is_admin_state_up = resource.Body('admin_state_up', type=bool)
#: List of load balancers associated with this listener.
#: *Type: list of dicts which contain the load balancer IDs*
load_balancer_ids = resource.Body('loadbalancers')
#: Name of the listener
name = resource.Body('name')
#: The ID of the project this listener is associated with.
project_id = resource.Body('project_id')
#: The protocol of the listener, which is TCP, HTTP, HTTPS
#: or TERMINATED_HTTPS.
protocol = resource.Body('protocol')
#: Port the listener will listen to, e.g. 80.
protocol_port = resource.Body('protocol_port')
#: A list of references to TLS secrets.
#: *Type: list*
sni_container_refs = resource.Body('sni_container_refs')
|
{
"content_hash": "2bf7f0b574a5b347ed7e4a91ee465ebe",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 75,
"avg_line_length": 40.21568627450981,
"alnum_prop": 0.681618722574354,
"repo_name": "briancurtin/python-openstacksdk",
"id": "39bf8b1c3f37d44e570cfa409d210593ef7d1b17",
"size": "2597",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack/network/v2/listener.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1956816"
},
{
"name": "Shell",
"bytes": "1865"
}
],
"symlink_target": ""
}
|
from pyramid_celery import celery_app as app
from datetime import datetime
@app.task
def get_date(*args, **kwargs):
msg = app.conf['PYRAMID_REGISTRY'].settings['message']
print(msg % datetime.utcnow())
@app.task
def get_date_2(*args, **kwargs):
msg = app.conf['PYRAMID_REGISTRY'].settings['message']
print('crontab: ' + msg % datetime.utcnow())
|
{
"content_hash": "c2a6e5f340a2925b5f6a1eb607e94bd4",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 58,
"avg_line_length": 22.9375,
"alnum_prop": 0.6784741144414169,
"repo_name": "edelooff/pyramid_celery",
"id": "1e1bc018476f232c20f811562a852901714b5fdf",
"size": "367",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/scheduler_example/scheduler_example/tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14344"
}
],
"symlink_target": ""
}
|
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
version = "0.2.0"
if sys.argv[-1] == 'publish':
try:
import wheel
except ImportError:
print('Wheel library missing. Please run "pip install wheel"')
sys.exit()
os.system('python setup.py sdist upload')
os.system('python setup.py bdist_wheel upload')
sys.exit()
if sys.argv[-1] == 'tag':
print("Tagging the version on github:")
os.system("git tag -a %s -m 'version %s'" % (version, version))
os.system("git push --tags")
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='dj-ango',
version=version,
description="""Simplifying the import structure of Django.""",
long_description=readme + '\n\n' + history,
author='Daniel Roy Greenfeld',
author_email='pydanny@gmail.com',
url='https://github.com/pydanny/dj-ango',
packages=[
'ango',
],
include_package_data=True,
install_requires=[
],
license="BSD",
zip_safe=False,
keywords='dj-ango',
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
|
{
"content_hash": "c264b29329543c239f2739bd65d7a584",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 70,
"avg_line_length": 28.35483870967742,
"alnum_prop": 0.5927189988623436,
"repo_name": "pydanny/dj-ango",
"id": "e916fc7a14854badd8caa76078cc3e8af867e908",
"size": "1805",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1219"
},
{
"name": "Python",
"bytes": "4048"
}
],
"symlink_target": ""
}
|
"""
Server Density Plugin
Proftpd connection details
https://github.com/serverdensity/sd-agent-plugins/
Autor: @bitbeans
Version: 1.0.0
"""
import json
import sys
import subprocess
import platform
import logging
import time
from decimal import *
try:
""" Q&D check if there is a proftpd binary, so count creater: 1 """
c = subprocess.check_output(
"whereis proftpd | awk '{print NF}'", shell=True).strip()
if float(c) < 2:
raise Exception("Missing proftpd")
except Exception:
sys.exit(0)
class Proftpd(object):
def __init__(self, agentConfig, checksLogger, rawConfig):
self.agentConfig = agentConfig
self.checksLogger = checksLogger
self.rawConfig = rawConfig
self.version = platform.python_version_tuple()
def run(self):
data = {}
if platform.system() == 'Linux':
users = []
connections = []
try:
e = subprocess.check_output(
"ftpwho -o oneline | grep '^[ 0-9]'", shell=True).strip()
connections = e.split('\n')
for connection in connections:
tmpuser = connection.split(' ')
if users.count(tmpuser[1]) == 0:
users.append(tmpuser[1])
except subprocess.CalledProcessError:
pass
data = {'connections': len(connections),
'users': len(users)}
else:
self.checks_logger.error(
'Plugin currently only available on Linux.')
return data
if __name__ == '__main__':
"""Standalone test"""
raw_agent_config = {
'Proftpd': {
}
}
main_checks_logger = logging.getLogger('Proftpd')
main_checks_logger.setLevel(logging.DEBUG)
main_checks_logger.addHandler(logging.StreamHandler(sys.stdout))
proftpd_check = Proftpd({}, main_checks_logger, raw_agent_config)
while True:
try:
print json.dumps(proftpd_check.run(), indent=4, sort_keys=True)
except:
main_checks_logger.exception("Unhandled exception")
finally:
time.sleep(60)
|
{
"content_hash": "3595a4559458b39a60115580f065f06c",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 77,
"avg_line_length": 29.213333333333335,
"alnum_prop": 0.5727978092195345,
"repo_name": "bencer/sd-agent-plugins",
"id": "7a1396607b114bdce14a216b2f2950395b4f21ce",
"size": "2191",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Proftpd/Proftpd.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "135454"
},
{
"name": "Shell",
"bytes": "43"
}
],
"symlink_target": ""
}
|
"""Support for Minut Point."""
import asyncio
import logging
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_TOKEN, CONF_WEBHOOK_ID
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect, async_dispatcher_send)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util.dt import as_local, parse_datetime, utc_from_timestamp
from . import config_flow # noqa pylint_disable=unused-import
from .const import (
CONF_WEBHOOK_URL, DOMAIN, EVENT_RECEIVED, POINT_DISCOVERY_NEW,
SCAN_INTERVAL, SIGNAL_UPDATE_ENTITY, SIGNAL_WEBHOOK)
_LOGGER = logging.getLogger(__name__)
CONF_CLIENT_ID = 'client_id'
CONF_CLIENT_SECRET = 'client_secret'
DATA_CONFIG_ENTRY_LOCK = 'point_config_entry_lock'
CONFIG_ENTRY_IS_SETUP = 'point_config_entry_is_setup'
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN:
vol.Schema({
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_CLIENT_SECRET): cv.string,
})
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the Minut Point component."""
if DOMAIN not in config:
return True
conf = config[DOMAIN]
config_flow.register_flow_implementation(
hass, DOMAIN, conf[CONF_CLIENT_ID],
conf[CONF_CLIENT_SECRET])
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={'source': config_entries.SOURCE_IMPORT},
))
return True
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Set up Point from a config entry."""
from pypoint import PointSession
def token_saver(token):
_LOGGER.debug('Saving updated token')
entry.data[CONF_TOKEN] = token
hass.config_entries.async_update_entry(entry, data={**entry.data})
# Force token update.
entry.data[CONF_TOKEN]['expires_in'] = -1
session = PointSession(
entry.data['refresh_args']['client_id'],
token=entry.data[CONF_TOKEN],
auto_refresh_kwargs=entry.data['refresh_args'],
token_saver=token_saver,
)
if not session.is_authorized:
_LOGGER.error('Authentication Error')
return False
hass.data[DATA_CONFIG_ENTRY_LOCK] = asyncio.Lock()
hass.data[CONFIG_ENTRY_IS_SETUP] = set()
await async_setup_webhook(hass, entry, session)
client = MinutPointClient(hass, entry, session)
hass.data.setdefault(DOMAIN, {}).update({entry.entry_id: client})
await client.update()
return True
async def async_setup_webhook(hass: HomeAssistantType, entry: ConfigEntry,
session):
"""Set up a webhook to handle binary sensor events."""
if CONF_WEBHOOK_ID not in entry.data:
entry.data[CONF_WEBHOOK_ID] = \
hass.components.webhook.async_generate_id()
entry.data[CONF_WEBHOOK_URL] = \
hass.components.webhook.async_generate_url(
entry.data[CONF_WEBHOOK_ID])
_LOGGER.info('Registering new webhook at: %s',
entry.data[CONF_WEBHOOK_URL])
hass.config_entries.async_update_entry(
entry, data={
**entry.data,
})
await hass.async_add_executor_job(
session.update_webhook,
entry.data[CONF_WEBHOOK_URL],
entry.data[CONF_WEBHOOK_ID],
['*'])
hass.components.webhook.async_register(
DOMAIN, 'Point', entry.data[CONF_WEBHOOK_ID], handle_webhook)
async def async_unload_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Unload a config entry."""
hass.components.webhook.async_unregister(entry.data[CONF_WEBHOOK_ID])
session = hass.data[DOMAIN].pop(entry.entry_id)
await hass.async_add_executor_job(session.remove_webhook)
if not hass.data[DOMAIN]:
hass.data.pop(DOMAIN)
for component in ('binary_sensor', 'sensor'):
await hass.config_entries.async_forward_entry_unload(
entry, component)
return True
async def handle_webhook(hass, webhook_id, request):
"""Handle webhook callback."""
try:
data = await request.json()
_LOGGER.debug("Webhook %s: %s", webhook_id, data)
except ValueError:
return None
if isinstance(data, dict):
data['webhook_id'] = webhook_id
async_dispatcher_send(hass, SIGNAL_WEBHOOK, data, data.get('hook_id'))
hass.bus.async_fire(EVENT_RECEIVED, data)
class MinutPointClient():
"""Get the latest data and update the states."""
def __init__(self, hass: HomeAssistantType, config_entry: ConfigEntry,
session):
"""Initialize the Minut data object."""
self._known_devices = set()
self._known_homes = set()
self._hass = hass
self._config_entry = config_entry
self._is_available = True
self._client = session
async_track_time_interval(self._hass, self.update, SCAN_INTERVAL)
async def update(self, *args):
"""Periodically poll the cloud for current state."""
await self._sync()
async def _sync(self):
"""Update local list of devices."""
if not await self._hass.async_add_executor_job(
self._client.update) and self._is_available:
self._is_available = False
_LOGGER.warning("Device is unavailable")
return
async def new_device(device_id, component):
"""Load new device."""
config_entries_key = '{}.{}'.format(component, DOMAIN)
async with self._hass.data[DATA_CONFIG_ENTRY_LOCK]:
if config_entries_key not in self._hass.data[
CONFIG_ENTRY_IS_SETUP]:
await self._hass.config_entries.async_forward_entry_setup(
self._config_entry, component)
self._hass.data[CONFIG_ENTRY_IS_SETUP].add(
config_entries_key)
async_dispatcher_send(
self._hass, POINT_DISCOVERY_NEW.format(component, DOMAIN),
device_id)
self._is_available = True
for home_id in self._client.homes:
if home_id not in self._known_homes:
await new_device(home_id, 'alarm_control_panel')
self._known_homes.add(home_id)
for device in self._client.devices:
if device.device_id not in self._known_devices:
for component in ('sensor', 'binary_sensor'):
await new_device(device.device_id, component)
self._known_devices.add(device.device_id)
async_dispatcher_send(self._hass, SIGNAL_UPDATE_ENTITY)
def device(self, device_id):
"""Return device representation."""
return self._client.device(device_id)
def is_available(self, device_id):
"""Return device availability."""
return device_id in self._client.device_ids
def remove_webhook(self):
"""Remove the session webhook."""
return self._client.remove_webhook()
@property
def homes(self):
"""Return known homes."""
return self._client.homes
def alarm_disarm(self, home_id):
"""Send alarm disarm command."""
return self._client.alarm_disarm(home_id)
def alarm_arm(self, home_id):
"""Send alarm arm command."""
return self._client.alarm_arm(home_id)
class MinutPointEntity(Entity):
"""Base Entity used by the sensors."""
def __init__(self, point_client, device_id, device_class):
"""Initialize the entity."""
self._async_unsub_dispatcher_connect = None
self._client = point_client
self._id = device_id
self._name = self.device.name
self._device_class = device_class
self._updated = utc_from_timestamp(0)
self._value = None
def __str__(self):
"""Return string representation of device."""
return "MinutPoint {}".format(self.name)
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
_LOGGER.debug('Created device %s', self)
self._async_unsub_dispatcher_connect = async_dispatcher_connect(
self.hass, SIGNAL_UPDATE_ENTITY, self._update_callback)
await self._update_callback()
async def async_will_remove_from_hass(self):
"""Disconnect dispatcher listener when removed."""
if self._async_unsub_dispatcher_connect:
self._async_unsub_dispatcher_connect()
async def _update_callback(self):
"""Update the value of the sensor."""
pass
@property
def available(self):
"""Return true if device is not offline."""
return self._client.is_available(self.device_id)
@property
def device(self):
"""Return the representation of the device."""
return self._client.device(self.device_id)
@property
def device_class(self):
"""Return the device class."""
return self._device_class
@property
def device_id(self):
"""Return the id of the device."""
return self._id
@property
def device_state_attributes(self):
"""Return status of device."""
attrs = self.device.device_status
attrs['last_heard_from'] = \
as_local(self.last_update).strftime("%Y-%m-%d %H:%M:%S")
return attrs
@property
def device_info(self):
"""Return a device description for device registry."""
device = self.device.device
return {
'connections': {('mac', device['device_mac'])},
'identifieres': device['device_id'],
'manufacturer': 'Minut',
'model': 'Point v{}'.format(device['hardware_version']),
'name': device['description'],
'sw_version': device['firmware']['installed'],
'via_hub': (DOMAIN, device['home']),
}
@property
def name(self):
"""Return the display name of this device."""
return "{} {}".format(self._name, self.device_class.capitalize())
@property
def is_updated(self):
"""Return true if sensor have been updated."""
return self.last_update > self._updated
@property
def last_update(self):
"""Return the last_update time for the device."""
last_update = parse_datetime(self.device.last_update)
return last_update
@property
def should_poll(self):
"""No polling needed for point."""
return False
@property
def unique_id(self):
"""Return the unique id of the sensor."""
return 'point.{}-{}'.format(self._id, self.device_class)
@property
def value(self):
"""Return the sensor value."""
return self._value
|
{
"content_hash": "93a6f1713cb0d3e607dda9217379852c",
"timestamp": "",
"source": "github",
"line_count": 335,
"max_line_length": 78,
"avg_line_length": 33.03283582089552,
"alnum_prop": 0.6169347551057293,
"repo_name": "molobrakos/home-assistant",
"id": "c0b2f7acd0fcd0e09ad231fe7730bee2a038f93d",
"size": "11066",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "homeassistant/components/point/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "HCL",
"bytes": "407"
},
{
"name": "Python",
"bytes": "15057917"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
}
|
"""
Box.net OAuth2 backend, docs at:
https://python-social-auth.readthedocs.io/en/latest/backends/box.html
"""
from .oauth import BaseOAuth2
class BoxOAuth2(BaseOAuth2):
"""Box.net OAuth authentication backend"""
name = "box"
AUTHORIZATION_URL = "https://www.box.com/api/oauth2/authorize"
ACCESS_TOKEN_METHOD = "POST"
ACCESS_TOKEN_URL = "https://www.box.com/api/oauth2/token"
REVOKE_TOKEN_URL = "https://www.box.com/api/oauth2/revoke"
SCOPE_SEPARATOR = ","
EXTRA_DATA = [
("refresh_token", "refresh_token", True),
("id", "id"),
("expires", "expires"),
]
def do_auth(self, access_token, response=None, *args, **kwargs):
response = response or {}
data = self.user_data(access_token)
data["access_token"] = response.get("access_token")
data["refresh_token"] = response.get("refresh_token")
data["expires"] = response.get("expires_in")
kwargs.update({"backend": self, "response": data})
return self.strategy.authenticate(*args, **kwargs)
def get_user_details(self, response):
"""Return user details Box.net account"""
fullname, first_name, last_name = self.get_user_names(response.get("name"))
return {
"username": response.get("login"),
"email": response.get("login") or "",
"fullname": fullname,
"first_name": first_name,
"last_name": last_name,
}
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
params = self.setting("PROFILE_EXTRA_PARAMS", {})
params["access_token"] = access_token
return self.get_json("https://api.box.com/2.0/users/me", params=params)
def refresh_token(self, token, *args, **kwargs):
params = self.refresh_token_params(token, *args, **kwargs)
request = self.request(
self.REFRESH_TOKEN_URL or self.ACCESS_TOKEN_URL,
data=params,
headers=self.auth_headers(),
method="POST",
)
return self.process_refresh_token_response(request, *args, **kwargs)
|
{
"content_hash": "c78cbe7f0023a0d3b32fa084e43b13e2",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 83,
"avg_line_length": 37.06896551724138,
"alnum_prop": 0.5995348837209302,
"repo_name": "python-social-auth/social-core",
"id": "2fc86dd285a752049f4c069642a4ab8c3fca6bb0",
"size": "2150",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "social_core/backends/box.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "958"
},
{
"name": "Makefile",
"bytes": "316"
},
{
"name": "Python",
"bytes": "807862"
},
{
"name": "Shell",
"bytes": "1923"
}
],
"symlink_target": ""
}
|
import unittest
from fbmq import template as Template
from fbmq import utils
class TemplateTest(unittest.TestCase):
def test_button_web(self):
btn = Template.ButtonWeb(title="title", url="https://test.com")
self.assertEquals('{"title": "title", "type": "web_url", "url": "https://test.com"}', utils.to_json(btn))
print(utils.to_json(btn))
def test_button_postback(self):
btn = Template.ButtonPostBack(title="title", payload="TEST_PAYLOAD")
self.assertEquals('{"payload": "TEST_PAYLOAD", "title": "title", "type": "postback"}', utils.to_json(btn))
print(utils.to_json(btn))
def test_button_phone(self):
btn = Template.ButtonPhoneNumber(title="title", payload="+82108011")
self.assertEquals('{"payload": "+82108011", "title": "title", "type": "phone_number"}', utils.to_json(btn))
print(utils.to_json(btn))
def test_buttons(self):
btns1 = Template.Buttons(text="Title", buttons=[
{'type': 'web_url', 'title': 'title', 'value': 'https://test.com'},
{'type': 'postback', 'title': 'title', 'value': 'TEST_PAYLOAD'},
{'type': 'phone_number', 'title': 'title', 'value': '+82108011'},
])
btns2 = Template.Buttons(text="Title", buttons=[
Template.ButtonWeb(title="title", url="https://test.com"),
Template.ButtonPostBack(title="title", payload="TEST_PAYLOAD"),
Template.ButtonPhoneNumber(title="title", payload="+82108011")
])
self.assertEquals(utils.to_json(btns1), utils.to_json(btns2))
def test_button_shortcut(self):
btns = Template.Buttons.convert_shortcut_buttons([
{'type': 'web_url', 'title': 'title', 'value': 'https://test.com'},
{'type': 'postback', 'title': 'title', 'value': 'TEST_PAYLOAD'},
{'type': 'phone_number', 'title': 'title', 'value': '+82108011'},
Template.ButtonWeb(title="title", url="https://test.com"),
])
self.assertEquals('[{"title": "title", "type": "web_url", "url": "https://test.com"},'
' {"payload": "TEST_PAYLOAD", "title": "title", "type": "postback"},'
' {"payload": "+82108011", "title": "title", "type": "phone_number"},'
' {"title": "title", "type": "web_url", "url": "https://test.com"}]', utils.to_json(btns))
with self.assertRaises(ValueError) as context:
Template.Buttons.convert_shortcut_buttons([{'type': 'url', 'title': 'title', 'value': 'https://test.com'}])
with self.assertRaises(ValueError) as context:
Template.Buttons.convert_shortcut_buttons(['hello'])
self.assertEquals(None, Template.Buttons.convert_shortcut_buttons(None))
def test_generic(self):
generic = Template.Generic(
elements=[Template.GenericElement(title='generic', subtitle='subtitle', item_url='https://test.com',
image_url='https://test.com/img',
buttons=[
{'type': 'web_url', 'title': 'title', 'value': 'https://test.com'}])])
self.assertEquals(
'{"payload": {"elements": [{"buttons": [{"title": "title", "type": "web_url", "url": "https://test.com"}],'
' "image_url": "https://test.com/img", "item_url": "https://test.com", "subtitle": "subtitle",'
' "title": "generic"}], "template_type": "generic"}, "type": "template"}', utils.to_json(generic))
def test_account_link(self):
link = Template.AccountLink(text="title", account_link_url="http://test.com", account_unlink_button=True)
self.assertEquals('{"payload": {"buttons": [{"type": "account_link", "url": "http://test.com"}, '
'{"type": "account_unlink"}], "template_type": "button", "text": "title"}, '
'"type": "template"}', utils.to_json(link))
def test_receipt_template(self):
receipt_id = "order1357"
element = Template.ReceiptElement(title="Oculus Rift",
subtitle="Includes: headset, sensor, remote",
quantity=1,
price=599.00,
currency="USD",
image_url="/assets/riftsq.png"
)
address = Template.ReceiptAddress(street_1="1 Hacker Way",
street_2="",
city="Menlo Park",
postal_code="94025",
state="CA",
country="US")
summary = Template.ReceiptSummary(subtotal=698.99,
shipping_cost=20.00,
total_tax=57.67,
total_cost=626.66)
adjustment = Template.ReceiptAdjustment(name="New Customer Discount", amount=-50)
receipt = Template.Receipt(recipient_name='Peter Chang',
order_number=receipt_id,
currency='USD',
payment_method='Visa 1234',
timestamp="1428444852",
elements=[element],
address=address,
summary=summary,
adjustments=[adjustment])
self.assertEquals('{"payload": {"address": {"city": "Menlo Park", "country": "US", '
'"postal_code": "94025", "state": "CA", "street_1": "1 Hacker Way", "street_2": ""}, '
'"adjustments": [{"amount": -50, "name": "New Customer Discount"}], "currency": "USD", '
'"elements": [{"currency": "USD", "image_url": "/assets/riftsq.png", "price": 599.0, '
'"quantity": 1, "subtitle": "Includes: headset, sensor, remote", "title": "Oculus Rift"}], '
'"order_number": "order1357", "payment_method": "Visa 1234", "recipient_name": '
'"Peter Chang", "summary": {"shipping_cost": 20.0, "subtotal": 698.99, "total_cost": 626.66, '
'"total_tax": 57.67}, "template_type": "receipt", "timestamp": "1428444852"}, '
'"type": "template"}', utils.to_json(receipt))
|
{
"content_hash": "acb79d1d2d4f74f8c6d5d3fff193e3cf",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 120,
"avg_line_length": 57.18803418803419,
"alnum_prop": 0.4917052757435361,
"repo_name": "antikytheraton/pocBot_GBM",
"id": "8124718751f77f92ef8b19c7f734d6d24325bba3",
"size": "6691",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_template.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "682"
},
{
"name": "Python",
"bytes": "73498"
}
],
"symlink_target": ""
}
|
__author__ = 'sergey'
__NUMBER__ = 20160422002
def run(manager):
"""
:param manager: Database manager
:type manager: dedupsqlfs.db.sqlite.manager.DbManager|dedupsqlfs.db.mysql.manager.DbManager
:return: bool
"""
if manager.TYPE == "sqlite":
try:
table_sv = manager.getTable("subvolume")
"""
:type table_sv: dedupsqlfs.db.sqlite.table.subvolume.TableSubvolume |
dedupsqlfs.db.mysql.table.subvolume.TableSubvolume
"""
from dedupsqlfs.lib.constants import ROOT_SUBVOLUME_NAME
cur = table_sv.getCursor()
cur.execute("SELECT `hash` FROM `%s`" % table_sv.getName())
svHashes = cur.fetchall()
manager.getLogger().info("Migration #%s: subvolumes to process = %r" % (__NUMBER__, svHashes,))
for item in svHashes:
h = item["hash"].decode()
for tn in ["inode", "xattr", "tree", "link", "inode_option", "inode_hash_block",]:
old_tn = "%s_%s" % (tn, h,)
table = manager.getTable(old_tn, True)
"""
:type table: dedupsqlfs.db.sqlite.table._base.Table |
dedupsqlfs.db.mysql.table_base.Table
"""
table.setName(old_tn)
if table.hasTable():
manager.getLogger().info("Migration #%s: alter table name %r => %r" % (__NUMBER__, old_tn, tn,))
cur = table.getCursor()
cur.execute("ALTER TABLE `%s` RENAME TO `%s`;" % (old_tn, tn,))
table.setName(tn)
except Exception as e:
import traceback
manager.getLogger().error("Migration #%s error: %s" % (__NUMBER__, e,))
manager.getLogger().error("Migration #%s trace:\n%s" % (__NUMBER__, traceback.format_exc(),))
return False
table_opts = manager.getTable("option")
table_opts.getCursor()
mignumber = table_opts.get("migration")
if not mignumber:
table_opts.insert("migration", __NUMBER__)
else:
table_opts.update("migration", __NUMBER__)
table_opts.commit()
return True
|
{
"content_hash": "c67499da1de06a77ed076fba20b15827",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 120,
"avg_line_length": 33.661764705882355,
"alnum_prop": 0.5176933158584535,
"repo_name": "sergey-dryabzhinsky/dedupsqlfs",
"id": "cb0c8b2604eaf47333487a57d41662f64372ad7b",
"size": "2427",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dedupsqlfs/db/migrations/m20160422002.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5509796"
},
{
"name": "C++",
"bytes": "33360"
},
{
"name": "Cython",
"bytes": "107356"
},
{
"name": "Python",
"bytes": "1042676"
},
{
"name": "Shell",
"bytes": "1480"
}
],
"symlink_target": ""
}
|
from subprocess import call
import os
call(['l10n', 'link', '1big52YCGIDxU8Tsjh7SPjrSuwuOmuHUkMRWjup1owgA'])
call(['l10n', 'export', '--exporter=android', '--fallback', 'en_US'])
call(['cp', 'strings-en_US.xml', '../mpos-ui/src/main/res/values/strings.xml'])
call(['mv', 'strings-en_US.xml', '../mpos-ui/src/main/res/values-en/strings.xml'])
call(['mv', 'strings-de_DE.xml', '../mpos-ui/src/main/res/values-de/strings.xml'])
call(['mv', 'strings-fr_FR.xml', '../mpos-ui/src/main/res/values-fr/strings.xml'])
call(['mv', 'strings-it_IT.xml', '../mpos-ui/src/main/res/values-it/strings.xml'])
call(['mv', 'strings-pt_PT.xml', '../mpos-ui/src/main/res/values-pt/strings.xml'])
call(['mv', 'strings-es_ES.xml', '../mpos-ui/src/main/res/values-es/strings.xml'])
call(['mv', 'strings-nl_BE.xml', '../mpos-ui/src/main/res/values-nl/strings.xml'])
call(['mv', 'strings-fi_FI.xml', '../mpos-ui/src/main/res/values-fi/strings.xml'])
call(['mv', 'strings-pl_PL.xml', '../mpos-ui/src/main/res/values-pl/strings.xml'])
call(['mv', 'strings-sv_SE.xml', '../mpos-ui/src/main/res/values-sv/strings.xml'])
|
{
"content_hash": "b3d0cf4077f8505786396f46c16b40fa",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 82,
"avg_line_length": 60.666666666666664,
"alnum_prop": 0.6602564102564102,
"repo_name": "payworks/mpos-ui.android.paybutton",
"id": "290c946b7acdc97bc063391e8626171c59fdb028",
"size": "1092",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "l10n/l10nhelper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "382149"
},
{
"name": "Python",
"bytes": "1092"
}
],
"symlink_target": ""
}
|
from handlers import host_post_start
from handlers import host_pre_stop
from handlers import _set_send_node_event_on_error_handler
from workflow import WfEvent
from workflow import build_wf_event
def _get_nodes_instances(ctx, node_id):
instances = []
for node in ctx.nodes:
for instance in node.instances:
if instance.node_id == node_id:
instances.append(instance)
return instances
def _get_all_nodes_instances(ctx):
node_instances = set()
for node in ctx.nodes:
for instance in node.instances:
node_instances.add(instance)
return node_instances
def set_state_task(ctx, graph, node_id, state_name, step_id, custom_context):
sequence = _set_state_task(ctx, graph, node_id, state_name, step_id)
if sequence is not None:
sequence.name = step_id
# start = ctx.internal.send_workflow_event(event_type='custom_workflow', message=build_wf_event(WfEvent(step_id, "in")))
# sequence.set_head(start)
# end = ctx.internal.send_workflow_event(event_type='custom_workflow', message=build_wf_event(WfEvent(step_id, "ok")))
# sequence.add(end)
custom_context.tasks[step_id] = sequence
def _set_state_task(ctx, graph, node_id, state_name, step_id):
sequence = None
instances = _get_nodes_instances(ctx, node_id)
instance_count = len(instances)
if instance_count == 1:
instance = instances[0]
sequence = set_state_task_for_instance(graph, node_id, instance, state_name, step_id)
elif instance_count > 1:
fork = ForkjoinWrapper(graph)
for instance in instances:
fork.add(set_state_task_for_instance(graph, node_id, instance, state_name, step_id))
msg = "state {0} on all {1} node instances".format(state_name, node_id)
sequence = forkjoin_sequence(graph, fork, instances[0], msg)
return sequence
def set_state_task_for_instance(graph, node_id, instance, state_name, step_id):
task = TaskSequenceWrapper(graph)
msg = build_wf_event(WfEvent(instance.id, "in", step_id))
task.add(instance.send_event(msg))
task.add(instance.set_state(state_name))
msg = build_wf_event(WfEvent(instance.id, "ok", step_id))
task.add(instance.send_event(msg))
return task
def operation_task(ctx, graph, node_id, operation_fqname, step_id, custom_context):
sequence = _operation_task(ctx, graph, node_id, operation_fqname, step_id, custom_context)
if sequence is not None:
sequence.name = step_id
# start = ctx.internal.send_workflow_event(event_type='custom_workflow', message=build_wf_event(WfEvent(step_id, "in")))
# sequence.set_head(start)
# end = ctx.internal.send_workflow_event(event_type='custom_workflow', message=build_wf_event(WfEvent(step_id, "ok")))
# sequence.add(end)
custom_context.tasks[step_id] = sequence
def _operation_task(ctx, graph, node_id, operation_fqname, step_id, custom_context):
sequence = None
instances = _get_nodes_instances(ctx, node_id)
first_instance = None
instance_count = len(instances)
if instance_count == 1:
instance = instances[0]
first_instance = instance
sequence = operation_task_for_instance(ctx, graph, node_id, instance, operation_fqname, step_id, custom_context)
elif instance_count > 1:
fork = ForkjoinWrapper(graph)
for instance in instances:
instance_task = operation_task_for_instance(ctx, graph, node_id, instance, operation_fqname, step_id, custom_context)
fork.add(instance_task)
msg = "operation {0} on all {1} node instances".format(operation_fqname, node_id)
first_instance = instances[0]
sequence = forkjoin_sequence(graph, fork, first_instance, msg)
return sequence
def count_relationships(instance):
relationship_count = 0
for relationship in instance.relationships:
relationship_count += 1
return relationship_count
def operation_task_for_instance(ctx, graph, node_id, instance, operation_fqname, step_id, custom_context):
sequence = TaskSequenceWrapper(graph)
msg = build_wf_event(WfEvent(instance.id, "in", step_id))
sequence.add(instance.send_event(msg))
relationship_count = count_relationships(instance)
if operation_fqname == 'cloudify.interfaces.lifecycle.start':
sequence.add(instance.execute_operation(operation_fqname))
if _is_host_node(instance):
sequence.add(*host_post_start(ctx, instance))
fork = ForkjoinWrapper(graph)
fork.add(instance.execute_operation('cloudify.interfaces.monitoring.start'))
if relationship_count > 0:
for relationship in instance.relationships:
fork.add(relationship.execute_source_operation('cloudify.interfaces.relationship_lifecycle.establish'))
fork.add(relationship.execute_target_operation('cloudify.interfaces.relationship_lifecycle.establish'))
sequence.add(
instance.send_event("Start monitoring on node '{0}' instance '{1}'".format(node_id, instance.id)),
forkjoin_sequence(graph, fork, instance, "establish")
)
elif operation_fqname == 'cloudify.interfaces.lifecycle.configure':
as_target_relationships = custom_context.relationship_targets.get(instance.id, set())
if relationship_count > 0 or len(as_target_relationships) > 0:
preconfigure_tasks = ForkjoinWrapper(graph)
for relationship in instance.relationships:
preconfigure_tasks.add(relationship.execute_source_operation('cloudify.interfaces.relationship_lifecycle.preconfigure'))
for relationship in as_target_relationships:
preconfigure_tasks.add(relationship.execute_target_operation('cloudify.interfaces.relationship_lifecycle.preconfigure'))
sequence.add(forkjoin_sequence(graph, preconfigure_tasks, instance, "preconf for {0}".format(instance.id)))
sequence.add(instance.execute_operation(operation_fqname))
if relationship_count > 0 or len(as_target_relationships) > 0:
postconfigure_tasks = ForkjoinWrapper(graph)
for relationship in instance.relationships:
postconfigure_tasks.add(relationship.execute_source_operation('cloudify.interfaces.relationship_lifecycle.postconfigure'))
for relationship in as_target_relationships:
task = relationship.execute_target_operation('cloudify.interfaces.relationship_lifecycle.postconfigure')
_set_send_node_event_on_error_handler(task, instance, "Error occurred while postconfiguring node as target for relationship {0} - ignoring...".format(relationship))
postconfigure_tasks.add(task)
msg = "postconf for {0}".format(instance.id)
sequence.add(forkjoin_sequence(graph, postconfigure_tasks, instance, msg))
elif operation_fqname == 'cloudify.interfaces.lifecycle.stop':
if _is_host_node(instance):
sequence.add(*host_pre_stop(instance))
task = instance.execute_operation(operation_fqname)
_set_send_node_event_on_error_handler(task, instance, "Error occurred while stopping node - ignoring...")
sequence.add(task)
# now call unlink onto relations' target
if relationship_count > 0:
fork = ForkjoinWrapper(graph)
for relationship in instance.relationships:
unlink_task_source = relationship.execute_source_operation('cloudify.interfaces.relationship_lifecycle.unlink')
_set_send_node_event_on_error_handler(unlink_task_source, instance, "Error occurred while unlinking node from target {0} - ignoring...".format(relationship.target_id))
fork.add(unlink_task_source)
unlink_task_target = relationship.execute_target_operation('cloudify.interfaces.relationship_lifecycle.unlink')
_set_send_node_event_on_error_handler(unlink_task_target, instance, "Error occurred while unlinking node from target {0} - ignoring...".format(relationship.target_id))
fork.add(unlink_task_target)
sequence.add(forkjoin_sequence(graph, fork, instance, "unlink"))
elif operation_fqname == 'cloudify.interfaces.lifecycle.delete':
task = instance.execute_operation(operation_fqname)
_set_send_node_event_on_error_handler(task, instance, "Error occurred while deleting node - ignoring...")
sequence.add(task)
else:
# the default behavior : just do the job
sequence.add(instance.execute_operation(operation_fqname))
msg = build_wf_event(WfEvent(instance.id, "ok", step_id))
sequence.add(instance.send_event(msg))
return sequence
def forkjoin_sequence(graph, forkjoin_wrapper, instance, label):
sequence = TaskSequenceWrapper(graph)
sequence.add(instance.send_event("forking: {0} instance '{1}'".format(label, instance.id)))
sequence.add(forkjoin_wrapper)
sequence.add(instance.send_event("joining: {0} instance '{1}'".format(label, instance.id)))
return sequence
def link_tasks(graph, source_id, target_id, custom_context):
sources = custom_context.tasks.get(source_id, None)
targets = custom_context.tasks.get(target_id, None)
_link_tasks(graph, sources, targets)
def _link_tasks(graph, sources, targets):
if sources is None:
return
if isinstance(sources, TaskSequenceWrapper) or isinstance(sources, ForkjoinWrapper):
sources = sources.first_tasks
else:
sources = [sources]
if targets is None:
return
if isinstance(targets, TaskSequenceWrapper) or isinstance(targets, ForkjoinWrapper):
targets = targets.last_tasks
else:
targets = [targets]
for source in sources:
for target in targets:
graph.add_dependency(source, target)
def _is_host_node(node_instance):
return 'cloudify.nodes.Compute' in node_instance.node.type_hierarchy
# def _relationship_operations(node_instance, operation):
# tasks_with_targets = _relationship_operations_with_targets(
# node_instance, operation)
# return [task for task, _ in tasks_with_targets]
#
#
# def _relationship_operations_with_targets(node_instance, operation):
# tasks = []
# for relationship in node_instance.relationships:
# tasks += _relationship_operations_with_target(relationship, operation)
# return tasks
#
#
# def _relationship_operations_with_target(relationship, operation):
# return [
# (relationship.execute_source_operation(operation),
# relationship.target_id)
# ]
def generate_native_node_workflows(ctx, graph, custom_context, stage):
native_nodes = custom_context.get_native_nodes(ctx)
# for each native node we build a sequence of operations
native_sequences = {}
for node in native_nodes:
sequence = _generate_native_node_sequence(ctx, graph, node, stage, custom_context)
if sequence is not None:
native_sequences[node.id] = sequence
# we explore the relations between native nodes to orchestrate tasks 'a la' cloudify
for node in native_nodes:
sequence = native_sequences.get(node.id, None)
if sequence is not None:
for relationship in node.relationships:
target_id = relationship.target_id
target_sequence = native_sequences.get(target_id, None)
if target_sequence is not None:
if stage == 'install':
_link_tasks(graph, sequence, target_sequence)
elif stage == 'uninstall':
_link_tasks(graph, target_sequence, sequence)
# when posible, associate the native sequences with the corresponding delegate workflow step
for node in native_nodes:
sequence = native_sequences.get(node.id, None)
if sequence is not None:
delegate_wf_step = custom_context.delegate_wf_steps.get(node.id, None)
if delegate_wf_step is not None:
# the delegate wf step can be associated to a native sequence
# let's register it in the custom context to make it available for non native tasks links
custom_context.tasks[delegate_wf_step] = sequence
# and remove it from the original map
del custom_context.delegate_wf_steps[node.id]
# this sequence is now associated with a delegate wf step, just remove it from the map
del native_sequences[node.id]
# iterate through remaining delegate_wf_steps
# the remaining ones are those that are not associated with a native sequence
# at this stage, we are not able to associate these remaining delegate wf steps (we don't have
# a bridge between java world model and python world model (cfy blueprint) )
# so: we fork all remaining sequences and we associate the fork-join to all remaining delegate step
if len(custom_context.delegate_wf_steps) > 0 and len(native_sequences) > 0:
# let's create a fork join with remaining sequences
fork = ForkjoinWrapper(graph)
for sequence in native_sequences.itervalues():
fork.add(sequence)
for stepId in custom_context.delegate_wf_steps.itervalues():
# we register this fork using the delegate wf step id
# so it can be referenced later to link non native tasks
custom_context.tasks[stepId] = fork
def _generate_native_node_sequence(ctx, graph, node, stage, custom_context):
if stage == 'install':
return _generate_native_node_sequence_install(ctx, graph, node, custom_context)
elif stage == 'uninstall':
return _generate_native_node_sequence_uninstall(ctx, graph, node, custom_context)
else:
return None
def _generate_native_node_sequence_install(ctx, graph, node, custom_context):
sequence = TaskSequenceWrapper(graph)
sequence.add(_set_state_task(ctx, graph, node.id, 'initial', '_{0}_initial'.format(node.id)))
sequence.add(_set_state_task(ctx, graph, node.id, 'creating', '_{0}_creating'.format(node.id)))
sequence.add(_operation_task(ctx, graph, node.id, 'cloudify.interfaces.lifecycle.create', '_create_{0}'.format(node.id), custom_context))
sequence.add(_set_state_task(ctx, graph, node.id, 'created', '_{0}_created'.format(node.id)))
sequence.add(_set_state_task(ctx, graph, node.id, 'configuring', '_{0}_configuring'.format(node.id)))
sequence.add(_operation_task(ctx, graph, node.id, 'cloudify.interfaces.lifecycle.configure', '_configure_{0}'.format(node.id), custom_context))
sequence.add(_set_state_task(ctx, graph, node.id, 'configured', '_{0}_configured'.format(node.id)))
sequence.add(_set_state_task(ctx, graph, node.id, 'starting', '_{0}_starting'.format(node.id)))
sequence.add(_operation_task(ctx, graph, node.id, 'cloudify.interfaces.lifecycle.start', '_start_{0}'.format(node.id), custom_context))
sequence.add(_set_state_task(ctx, graph, node.id, 'started', '_{0}_started'.format(node.id)))
return sequence
def _generate_native_node_sequence_uninstall(ctx, graph, node, custom_context):
sequence = TaskSequenceWrapper(graph)
sequence.add(_set_state_task(ctx, graph, node.id, 'stopping', '_{0}_stopping'.format(node.id)))
sequence.add(_operation_task(ctx, graph, node.id, 'cloudify.interfaces.lifecycle.stop', '_stop_{0}'.format(node.id), custom_context))
sequence.add(_set_state_task(ctx, graph, node.id, 'stopped', '_{0}_stopped'.format(node.id)))
sequence.add(_set_state_task(ctx, graph, node.id, 'deleting', '_{0}_deleting'.format(node.id)))
sequence.add(_operation_task(ctx, graph, node.id, 'cloudify.interfaces.lifecycle.delete', '_delete_{0}'.format(node.id), custom_context))
sequence.add(_set_state_task(ctx, graph, node.id, 'deleted', '_{0}_deleted'.format(node.id)))
return sequence
class ForkjoinWrapper(object):
def __init__(self, graph, name=""):
self.graph = graph
self.first_tasks = []
self.last_tasks = []
self.name = name
def add(self, *tasks):
for element in tasks:
if isinstance(element, ForkjoinWrapper):
self.first_tasks.extend(element.first_tasks)
self.last_tasks.extend(element.last_tasks)
elif isinstance(element, TaskSequenceWrapper):
self.first_tasks.extend(element.first_tasks)
self.last_tasks.extend(element.last_tasks)
else:
self.first_tasks.append(element)
self.last_tasks.append(element)
self.graph.add_task(element)
class TaskSequenceWrapper(object):
def __init__(self, graph, name=""):
self.graph = graph
self.first_tasks = None
self.last_tasks = None
self.name = name
def set_head(self, task):
if self.first_tasks is None:
self.add(task)
else:
self.graph.add_task(task)
for next_task in self.first_tasks:
self.graph.add_dependency(next_task, task)
self.first_tasks = [task]
def add(self, *tasks):
for element in tasks:
tasks_head = None
tasks_queue = None
if isinstance(element, ForkjoinWrapper):
tasks_head = element.first_tasks
tasks_queue = element.last_tasks
elif isinstance(element, TaskSequenceWrapper):
tasks_head = element.first_tasks
tasks_queue = element.last_tasks
else:
tasks_head = [element]
tasks_queue = tasks_head
self.graph.add_task(element)
for task in tasks_head:
if self.last_tasks is not None:
for last_task in self.last_tasks:
self.graph.add_dependency(task, last_task)
if tasks_head is not None:
if self.first_tasks is None:
self.first_tasks = tasks_head
if tasks_queue is not None:
self.last_tasks = tasks_queue
class CustomContext(object):
def __init__(self, ctx):
self.tasks = {}
self.relationship_targets = {}
# a set of nodeId for which wf is customized (designed using a4c)
self.customized_wf_nodes = set()
# a dict of nodeId -> stepId : nodes for which we need to manage the wf ourself
self.delegate_wf_steps = {}
self.__build_relationship_targets(ctx)
'''
Build a map containing all the relationships that target a given node instance :
- key is target_id (a node instance id)
- value is a set of relationships (all relationship that target this node)
'''
def __build_relationship_targets(self, ctx):
node_instances = _get_all_nodes_instances(ctx)
for node_instance in node_instances:
ctx.internal.send_workflow_event(
event_type='other',
message="found an instance of {0} : {1}".format(node_instance.node_id, node_instance.id))
for relationship in node_instance.relationships:
target_relationships = self.relationship_targets.get(relationship.target_id, None)
if target_relationships is None:
target_relationships = set()
self.relationship_targets[relationship.target_id] = target_relationships
ctx.internal.send_workflow_event(
event_type='other',
message="found a relationship that targets {0} : {1}".format(relationship.target_id, relationship))
target_relationships.add(relationship)
def add_customized_wf_node(self, nodeId):
self.customized_wf_nodes.add(nodeId)
# the native node are those for which workflow is not managed by a4c
def get_native_nodes(self, ctx):
native_nodes = set()
for node in ctx.nodes:
if node.id not in self.customized_wf_nodes:
native_nodes.add(node)
return native_nodes
def register_native_delegate_wf_step(self, nodeId, stepId):
self.delegate_wf_steps[nodeId] = stepId
|
{
"content_hash": "539278a6fa8cd2bdd8f3c50d8c054158",
"timestamp": "",
"source": "github",
"line_count": 414,
"max_line_length": 183,
"avg_line_length": 49.089371980676326,
"alnum_prop": 0.658564188358018,
"repo_name": "victorkeophila/alien4cloud-cloudify3-provider",
"id": "09238482e1f18a2a1fe28762f57ba37e276011fa",
"size": "20323",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "src/test/python/workflows/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3432"
},
{
"name": "Java",
"bytes": "374797"
},
{
"name": "Python",
"bytes": "3272862"
},
{
"name": "Shell",
"bytes": "143412"
}
],
"symlink_target": ""
}
|
import inspect
from collections import OrderedDict
# THIRD PARTY
import numpy as np
import pytest
# LOCAL
from astropy.cosmology import Cosmology
from astropy.cosmology.io.mapping import from_mapping, to_mapping
from .base import ToFromDirectTestBase, ToFromTestMixinBase
###############################################################################
class ToFromMappingTestMixin(ToFromTestMixinBase):
"""Tests for a Cosmology[To/From]Format with ``format="mapping"``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
def test_to_mapping_default(self, cosmo, to_format):
"""Test default usage of Cosmology -> mapping."""
m = to_format("mapping")
keys = tuple(m.keys())
assert isinstance(m, dict)
# Check equality of all expected items
assert keys[0] == "cosmology"
assert m.pop("cosmology") is cosmo.__class__
assert keys[1] == "name"
assert m.pop("name") == cosmo.name
for i, k in enumerate(cosmo.__parameters__, start=2):
assert keys[i] == k
assert np.array_equal(m.pop(k), getattr(cosmo, k))
assert keys[-1] == "meta"
assert m.pop("meta") == cosmo.meta
# No unexpected items
assert not m
def test_to_mapping_wrong_cls(self, to_format):
"""Test incorrect argument ``cls`` in ``to_mapping()``."""
with pytest.raises(TypeError, match="'cls' must be"):
to_format("mapping", cls=list)
@pytest.mark.parametrize("map_cls", [dict, OrderedDict])
def test_to_mapping_cls(self, to_format, map_cls):
"""Test argument ``cls`` in ``to_mapping()``."""
m = to_format("mapping", cls=map_cls)
assert isinstance(m, map_cls) # test type
def test_to_mapping_cosmology_as_str(self, cosmo_cls, to_format):
"""Test argument ``cosmology_as_str`` in ``to_mapping()``."""
default = to_format("mapping")
# Cosmology is the class
m = to_format("mapping", cosmology_as_str=False)
assert inspect.isclass(m["cosmology"])
assert cosmo_cls is m["cosmology"]
assert m == default # False is the default option
# Cosmology is a string
m = to_format("mapping", cosmology_as_str=True)
assert isinstance(m["cosmology"], str)
assert m["cosmology"] == cosmo_cls.__qualname__ # Correct class
assert tuple(m.keys())[0] == "cosmology" # Stayed at same index
def test_tofrom_mapping_cosmology_as_str(self, cosmo, to_format, from_format):
"""Test roundtrip with ``cosmology_as_str=True``.
The test for the default option (`False`) is in ``test_tofrom_mapping_instance``.
"""
m = to_format("mapping", cosmology_as_str=True)
got = from_format(m, format="mapping")
assert got == cosmo
assert got.meta == cosmo.meta
def test_to_mapping_move_from_meta(self, to_format):
"""Test argument ``move_from_meta`` in ``to_mapping()``."""
default = to_format("mapping")
# Metadata is 'separate' from main mapping
m = to_format("mapping", move_from_meta=False)
assert "meta" in m.keys()
assert not any([k in m for k in m["meta"]]) # Not added to main
assert m == default # False is the default option
# Metadata is mixed into main mapping.
m = to_format("mapping", move_from_meta=True)
assert "meta" not in m.keys()
assert all([k in m for k in default["meta"]]) # All added to main
# The parameters take precedence over the metadata
assert all([np.array_equal(v, m[k]) for k, v in default.items() if k != "meta"])
def test_tofrom_mapping_move_tofrom_meta(self, cosmo, to_format, from_format):
"""Test roundtrip of ``move_from/to_meta`` in ``to/from_mapping()``."""
# Metadata is mixed into main mapping.
m = to_format("mapping", move_from_meta=True)
# (Just adding something to ensure there's 'metadata')
m["mismatching"] = "will error"
# (Tests are different if the last argument is a **kwarg)
if tuple(cosmo._init_signature.parameters.values())[-1].kind == 4:
got = from_format(m, format="mapping")
assert got.name == cosmo.name
assert "mismatching" not in got.meta
return # don't continue testing
# Reading with mismatching parameters errors...
with pytest.raises(TypeError, match="there are unused parameters"):
from_format(m, format="mapping")
# unless mismatched are moved to meta.
got = from_format(m, format="mapping", move_to_meta=True)
assert got == cosmo # (Doesn't check metadata)
assert got.meta["mismatching"] == "will error"
# -----------------------------------------------------
def test_from_not_mapping(self, cosmo, from_format):
"""Test incorrect map type in ``from_mapping()``."""
with pytest.raises((TypeError, ValueError)):
from_format("NOT A MAP", format="mapping")
def test_from_mapping_default(self, cosmo, to_format, from_format):
"""Test (cosmology -> Mapping) -> cosmology."""
m = to_format("mapping")
# Read from exactly as given.
got = from_format(m, format="mapping")
assert got == cosmo
assert got.meta == cosmo.meta
# Reading auto-identifies 'format'
got = from_format(m)
assert got == cosmo
assert got.meta == cosmo.meta
def test_fromformat_subclass_partial_info_mapping(self, cosmo):
"""
Test writing from an instance and reading from that class.
This works with missing information.
"""
m = cosmo.to_format("mapping")
# partial information
m.pop("cosmology", None)
m.pop("Tcmb0", None)
# read with the same class that wrote fills in the missing info with
# the default value
got = cosmo.__class__.from_format(m, format="mapping")
got2 = Cosmology.from_format(m, format="mapping", cosmology=cosmo.__class__)
got3 = Cosmology.from_format(
m, format="mapping", cosmology=cosmo.__class__.__qualname__
)
assert (got == got2) and (got2 == got3) # internal consistency
# not equal, because Tcmb0 is changed, which also changes m_nu
assert got != cosmo
assert got.Tcmb0 == cosmo.__class__._init_signature.parameters["Tcmb0"].default
assert got.clone(name=cosmo.name, Tcmb0=cosmo.Tcmb0, m_nu=cosmo.m_nu) == cosmo
# but the metadata is the same
assert got.meta == cosmo.meta
@pytest.mark.parametrize("format", [True, False, None, "mapping"])
def test_is_equivalent_to_mapping(self, cosmo, to_format, format):
"""Test :meth:`astropy.cosmology.Cosmology.is_equivalent`.
This test checks that Cosmology equivalency can be extended to any
Python object that can be converted to a Cosmology -- in this case
a mapping.
"""
obj = to_format("mapping")
assert not isinstance(obj, Cosmology)
is_equiv = cosmo.is_equivalent(obj, format=format)
assert is_equiv is (True if format is not False else False)
class TestToFromMapping(ToFromDirectTestBase, ToFromMappingTestMixin):
"""Directly test ``to/from_mapping``."""
def setup_class(self):
self.functions = {"to": to_mapping, "from": from_mapping}
@pytest.mark.skip("N/A")
def test_fromformat_subclass_partial_info_mapping(self):
"""This test does not apply to the direct functions."""
|
{
"content_hash": "e0811141e5234df6bb7947e81e537b4d",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 89,
"avg_line_length": 39.36318407960199,
"alnum_prop": 0.6086956521739131,
"repo_name": "pllim/astropy",
"id": "922e7c435fe1810b41829cf6c9d77f7c73c9884c",
"size": "7986",
"binary": false,
"copies": "3",
"ref": "refs/heads/placeholder",
"path": "astropy/cosmology/io/tests/test_mapping.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11040101"
},
{
"name": "C++",
"bytes": "47001"
},
{
"name": "Cython",
"bytes": "78776"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "183333"
},
{
"name": "M4",
"bytes": "18757"
},
{
"name": "Makefile",
"bytes": "52508"
},
{
"name": "Python",
"bytes": "12404182"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
}
|
from __future__ import division, absolute_import
import numpy as np
from copy import deepcopy
from fatiando import utils, gridder
from mesher import ProlateEllipsoid
import prolate_ellipsoid
from numpy.testing import assert_almost_equal
from pytest import raises
# Local-geomagnetic field
F = 30000
inc = 2
dec = -27
gm = 1000 # geometrical factor
area = [-5.*gm, 5.*gm, -5.*gm, 5.*gm]
x, y, z = gridder.scatter(area, 300, z=0.)
axis_ref = gm # reference semi-axis
# Prolate ellipsoids used for testing
model = [ProlateEllipsoid(x=-3*gm, y=-3*gm, z=3*axis_ref,
large_axis=axis_ref,
small_axis=0.6*axis_ref,
strike=78, dip=92, rake=135,
props={'principal susceptibilities': [0.7, 0.7,
0.7],
'susceptibility angles': [90., 47., 13.]}),
ProlateEllipsoid(x=-gm, y=-gm, z=2.4*axis_ref,
large_axis=1.1*axis_ref,
small_axis=0.3*axis_ref,
strike=4, dip=10, rake=5,
props={'principal susceptibilities': [0.2, 0.15,
0.05],
'susceptibility angles': [180, 19, -8.],
'remanent magnetization': [3, -6, 35]}),
ProlateEllipsoid(x=3*gm, y=3*gm, z=4*axis_ref,
large_axis=1.5*axis_ref,
small_axis=0.6*axis_ref,
strike=-58, dip=87, rake=49,
props={'remanent magnetization': [4.7, 39, 0]})]
def test_prolate_ellipsoid_force_prop():
"Test the prolate_ellipsoid code with an imposed physical property"
# forced physical property
pmag = utils.ang2vec(5, 43, -8)
# magnetic field produced by the ellipsoids
# with the forced physical property
bx = prolate_ellipsoid.bx(x, y, z, model,
F, inc, dec, demag=False, pmag=pmag)
by = prolate_ellipsoid.by(x, y, z, model,
F, inc, dec, demag=False, pmag=pmag)
bz = prolate_ellipsoid.bz(x, y, z, model,
F, inc, dec, demag=False, pmag=pmag)
tf = prolate_ellipsoid.tf(x, y, z, model,
F, inc, dec, demag=False, pmag=pmag)
# constant factor
f = 3.71768
# magnetic field produced by the ellipsoids
# with the forced physical property multiplied by the constant factor
bx2 = prolate_ellipsoid.bx(x, y, z, model,
F, inc, dec, demag=False, pmag=f*pmag)
by2 = prolate_ellipsoid.by(x, y, z, model,
F, inc, dec, demag=False, pmag=f*pmag)
bz2 = prolate_ellipsoid.bz(x, y, z, model,
F, inc, dec, demag=False, pmag=f*pmag)
tf2 = prolate_ellipsoid.tf(x, y, z, model,
F, inc, dec, demag=False, pmag=f*pmag)
# the fields must be proportional
assert_almost_equal(bx2, f*bx, decimal=12)
assert_almost_equal(by2, f*by, decimal=12)
assert_almost_equal(bz2, f*bz, decimal=12)
assert_almost_equal(tf2, f*tf, decimal=12)
# pmag not None requires demag not True
raises(AssertionError, prolate_ellipsoid.bx, x, y, z, model,
F, inc, dec, demag=True, pmag=pmag)
raises(AssertionError, prolate_ellipsoid.by, x, y, z, model,
F, inc, dec, demag=True, pmag=pmag)
raises(AssertionError, prolate_ellipsoid.bz, x, y, z, model,
F, inc, dec, demag=True, pmag=pmag)
raises(AssertionError, prolate_ellipsoid.tf, x, y, z, model,
F, inc, dec, demag=True, pmag=pmag)
def test_prolate_ellipsoid_ignore_none():
"Prolate ellipsoid ignores model elements that are None"
# forced physical property
pmag = utils.ang2vec(7, -52, 13)
# copy of the original model
model_none = deepcopy(model)
# force an element of the copy to be None
model_none[1] = None
# magnetic field produced by the original model
# without the removed element
bx = prolate_ellipsoid.bx(x, y, z, [model[0], model[2]],
F, inc, dec, demag=False, pmag=pmag)
by = prolate_ellipsoid.by(x, y, z, [model[0], model[2]],
F, inc, dec, demag=False, pmag=pmag)
bz = prolate_ellipsoid.bz(x, y, z, [model[0], model[2]],
F, inc, dec, demag=False, pmag=pmag)
tf = prolate_ellipsoid.tf(x, y, z, [model[0], model[2]],
F, inc, dec, demag=False, pmag=pmag)
# magnetic field produced by the copy
bx2 = prolate_ellipsoid.bx(x, y, z, model_none,
F, inc, dec, demag=False, pmag=pmag)
by2 = prolate_ellipsoid.by(x, y, z, model_none,
F, inc, dec, demag=False, pmag=pmag)
bz2 = prolate_ellipsoid.bz(x, y, z, model_none,
F, inc, dec, demag=False, pmag=pmag)
tf2 = prolate_ellipsoid.tf(x, y, z, model_none,
F, inc, dec, demag=False, pmag=pmag)
assert_almost_equal(bx2, bx, decimal=15)
assert_almost_equal(by2, by, decimal=15)
assert_almost_equal(bz2, bz, decimal=15)
assert_almost_equal(tf2, tf, decimal=15)
def test_prolate_ellipsoid_missing_prop():
"Self-demagnetization requires specific properties"
# demag=True requires specific properties
raises(AssertionError, prolate_ellipsoid._bx, x, y, z, model[2],
F, inc, dec, demag=True)
raises(AssertionError, prolate_ellipsoid._by, x, y, z, model[2],
F, inc, dec, demag=True)
raises(AssertionError, prolate_ellipsoid._bz, x, y, z, model[2],
F, inc, dec, demag=True)
def test_prolate_ellipsoid_susceptibility_tensor_missing_prop():
"Susceptibility tensor requires specific properties"
suscep1 = model[0].susceptibility_tensor
suscep2 = model[1].susceptibility_tensor
suscep3 = model[2].susceptibility_tensor
assert suscep1 is not None
assert suscep2 is not None
assert suscep3 is None
def test_prolate_ellipsoid_demag_factors_sum():
"The summation of the demagnetizing factors must be equal to one"
n11, n22 = prolate_ellipsoid.demag_factors(model[0])
assert_almost_equal(n11+n22+n22, 1., decimal=15)
n11, n22 = prolate_ellipsoid.demag_factors(model[1])
assert_almost_equal(n11+n22+n22, 1., decimal=15)
n11, n22 = prolate_ellipsoid.demag_factors(model[2])
assert_almost_equal(n11+n22+n22, 1., decimal=15)
def test_prolate_ellipsoid_demag_factors_signal_order():
"Demagnetizing factors must be all positive and ordered"
n11, n22 = prolate_ellipsoid.demag_factors(model[0])
assert (n11 > 0) and (n22 > 0)
assert n22 > n11
n11, n22 = prolate_ellipsoid.demag_factors(model[1])
assert (n11 > 0) and (n22 > 0)
assert n22 > n11
n11, n22 = prolate_ellipsoid.demag_factors(model[2])
assert (n11 > 0) and (n22 > 0)
assert n22 > n11
def test_prolate_ellipsoid_self_demagnetization():
"Self-demagnetization decreases the magnetization intensity"
mag_with_demag = prolate_ellipsoid.magnetization(model[1],
F, inc, dec,
demag=True)
mag_without_demag = prolate_ellipsoid.magnetization(model[1],
F, inc, dec,
demag=False)
mag_with_demag_norm = np.linalg.norm(mag_with_demag, ord=2)
mag_without_demag_norm = np.linalg.norm(mag_without_demag, ord=2)
assert mag_with_demag_norm < mag_without_demag_norm
def test_prolate_ellipsoid_neglecting_self_demagnetization():
"The error in magnetization by negleting self-demagnetization is bounded"
# susceptibility tensor
k1, k2, k3 = model[0].props['principal susceptibilities']
strike, dip, rake = model[0].props['susceptibility angles']
# demagnetizing factors
n11, n22 = prolate_ellipsoid.demag_factors(model[0])
# maximum relative error in the resulting magnetization
max_error = k3*n22
# magnetizations calculated with and without self-demagnetization
mag_with_demag = prolate_ellipsoid.magnetization(model[0],
F, inc, dec,
demag=True)
mag_without_demag = prolate_ellipsoid.magnetization(model[0],
F, inc, dec,
demag=False)
# difference in magnetization
mag_diff = mag_with_demag - mag_without_demag
# computed norms
mag_with_demag_norm = np.linalg.norm(mag_with_demag, ord=2)
mag_diff_norm = np.linalg.norm(mag_diff, ord=2)
# computed error
computed_error = mag_diff_norm/mag_with_demag_norm
assert computed_error <= max_error
def test_prolate_ellipsoid_depolarization_tensor():
"The depolarization tensor must be symmetric"
ellipsoid = model[1]
x1, x2, x3 = prolate_ellipsoid.x1x2x3(x, y, z, ellipsoid)
lamb = prolate_ellipsoid._lamb(x1, x2, x3, ellipsoid)
denominator = prolate_ellipsoid._dlamb_aux(x1, x2, x3, ellipsoid, lamb)
dlamb_dx = prolate_ellipsoid._dlamb(x1, x2, x3, ellipsoid, lamb,
denominator, deriv='x')
dlamb_dy = prolate_ellipsoid._dlamb(x1, x2, x3, ellipsoid, lamb,
denominator, deriv='y')
dlamb_dz = prolate_ellipsoid._dlamb(x1, x2, x3, ellipsoid, lamb,
denominator, deriv='z')
h1 = prolate_ellipsoid._hv(ellipsoid, lamb, v='x')
h2 = prolate_ellipsoid._hv(ellipsoid, lamb, v='y')
h3 = prolate_ellipsoid._hv(ellipsoid, lamb, v='z')
g1 = prolate_ellipsoid._gv(ellipsoid, lamb, v='x')
g2 = prolate_ellipsoid._gv(ellipsoid, lamb, v='y')
g3 = prolate_ellipsoid._gv(ellipsoid, lamb, v='z')
a = ellipsoid.large_axis
b = ellipsoid.small_axis
cte = -0.5*a*b*b
# elements of the depolarization tensor without the ellipsoid
nxx = cte*(dlamb_dx*h1*x1 + g1)
nyy = cte*(dlamb_dy*h2*x2 + g2)
nzz = cte*(dlamb_dz*h3*x3 + g3)
nxy = cte*(dlamb_dx*h2*x2)
nyx = cte*(dlamb_dy*h1*x1)
nxz = cte*(dlamb_dx*h3*x3)
nzx = cte*(dlamb_dz*h1*x1)
nyz = cte*(dlamb_dy*h3*x3)
nzy = cte*(dlamb_dz*h2*x2)
trace = nxx+nyy+nzz
# the trace must zero
assert_almost_equal(trace, np.zeros_like(nxx), decimal=3)
# the depolarization is symmetric
assert_almost_equal(nxy, nyx, decimal=3)
assert_almost_equal(nxz, nzx, decimal=3)
assert_almost_equal(nyz, nzy, decimal=3)
def test_prolate_ellipsoid_isotropic_susceptibility():
"Isostropic susceptibility must be proportional to identity"
k1, k2, k3 = model[0].props['principal susceptibilities']
strike, dip, rake = model[0].props['susceptibility angles']
suscep = model[0].susceptibility_tensor
assert np.allclose(suscep, k1*np.identity(3))
def test_confocal_prolate_ellipsoids():
"Confocal bodies with properly scaled suscep produce the same field"
# Reference ellipsoid
a, b, = 1000., 400. # semi-axes
chi = 1.2 # reference susceptibility
ellipsoid = ProlateEllipsoid(0., 0., 1500., a, b, 45., 10., -30.,
{'principal susceptibilities': [chi,
chi,
chi],
'susceptibility angles': [0., 0., 0.]})
# Intensity of the local-geomagnetic field (in nT)
B0 = 23500.
# Direction parallel to the semi-axis a
_, inc, dec = utils.vec2ang(ellipsoid.transf_matrix.T[0])
# Magnetic moment of the reference ellipsoid
volume = ellipsoid.volume
mag = prolate_ellipsoid.magnetization(ellipsoid, B0,
inc, dec, demag=True)
moment = volume*mag
# Confocal ellipsoid
u = 2.0e6
a_confocal = np.sqrt(a*a + u)
b_confocal = np.sqrt(b*b + u)
xc = ellipsoid.x
yc = ellipsoid.y
zc = ellipsoid.z
strike = ellipsoid.strike
dip = ellipsoid.dip
rake = ellipsoid.rake
confocal_ellipsoid = ProlateEllipsoid(xc, yc, zc,
a_confocal, b_confocal,
strike, dip, rake,
{'susceptibility angles':
[0., 0., 0.]})
n11, n22 = prolate_ellipsoid.demag_factors(confocal_ellipsoid)
H0 = B0/(4*np.pi*100)
volume_confocal = confocal_ellipsoid.volume
# Equivalent susceptibility
moment_norm = np.sqrt(np.sum(moment*moment))
chi_confocal = moment_norm/(volume_confocal*H0 - n11*moment_norm)
confocal_ellipsoid.addprop('principal susceptibilities',
[chi_confocal, chi_confocal, chi_confocal])
# Magnetic moment of the confocal ellipsoid
mag_confocal = prolate_ellipsoid.magnetization(confocal_ellipsoid, B0,
inc, dec, demag=True)
moment_confocal = volume_confocal*mag_confocal
# Total-field anomalies
tf = prolate_ellipsoid.tf(x, y, z, [ellipsoid], B0, inc, dec)
tf_confocal = prolate_ellipsoid.tf(x, y, z, [confocal_ellipsoid],
B0, inc, dec)
# Comparison between the moments and total-field anomalies
assert_almost_equal(moment, moment_confocal, decimal=5)
assert_almost_equal(tf, tf_confocal, decimal=12)
|
{
"content_hash": "6695edec3cf52abb0905f01c7991b52e",
"timestamp": "",
"source": "github",
"line_count": 339,
"max_line_length": 77,
"avg_line_length": 40.89085545722714,
"alnum_prop": 0.5764680421295628,
"repo_name": "pinga-lab/magnetic-ellipsoid",
"id": "5b313012483bab126a7e40a5fae380a988fcfea1",
"size": "13862",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/test_prolate_ellipsoid.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "10685961"
},
{
"name": "Makefile",
"bytes": "2875"
},
{
"name": "Python",
"bytes": "172917"
},
{
"name": "TeX",
"bytes": "330436"
}
],
"symlink_target": ""
}
|
"""Tests for aodh/storage/impl_mongodb.py
.. note::
In order to run the tests against another MongoDB server set the
environment variable aodh_TEST_MONGODB_URL to point to a MongoDB
server before running the tests.
"""
from aodh.alarm.storage import impl_mongodb as impl_mongodb
from aodh.tests import base as test_base
from aodh.tests import db as tests_db
@tests_db.run_with('mongodb')
class MongoDBConnection(tests_db.TestBase,
tests_db.MixinTestsWithBackendScenarios):
def test_connection_pooling(self):
test_conn = impl_mongodb.Connection(self.db_manager.url)
self.assertEqual(self.alarm_conn.conn, test_conn.conn)
def test_replica_set(self):
url = self.db_manager._url + '?replicaSet=foobar'
conn = impl_mongodb.Connection(url)
self.assertTrue(conn.conn)
@tests_db.run_with('mongodb')
class IndexTest(tests_db.TestBase,
tests_db.MixinTestsWithBackendScenarios):
def _test_ttl_index_absent(self, conn, coll_name, ttl_opt):
# create a fake index and check it is deleted
coll = getattr(conn.db, coll_name)
index_name = '%s_ttl' % coll_name
self.CONF.set_override(ttl_opt, -1, group='database')
conn.upgrade()
self.assertNotIn(index_name, coll.index_information())
self.CONF.set_override(ttl_opt, 456789, group='database')
conn.upgrade()
self.assertEqual(456789,
coll.index_information()
[index_name]['expireAfterSeconds'])
def test_alarm_history_ttl_index_absent(self):
self._test_ttl_index_absent(self.alarm_conn, 'alarm_history',
'alarm_history_time_to_live')
def _test_ttl_index_present(self, conn, coll_name, ttl_opt):
coll = getattr(conn.db, coll_name)
self.CONF.set_override(ttl_opt, 456789, group='database')
conn.upgrade()
index_name = '%s_ttl' % coll_name
self.assertEqual(456789,
coll.index_information()
[index_name]['expireAfterSeconds'])
self.CONF.set_override(ttl_opt, -1, group='database')
conn.upgrade()
self.assertNotIn(index_name, coll.index_information())
def test_alarm_history_ttl_index_present(self):
self._test_ttl_index_present(self.alarm_conn, 'alarm_history',
'alarm_history_time_to_live')
class CapabilitiesTest(test_base.BaseTestCase):
def test_alarm_capabilities(self):
expected_capabilities = {
'alarms': {'query': {'simple': True,
'complex': True},
'history': {'query': {'simple': True,
'complex': True}}},
}
actual_capabilities = impl_mongodb.Connection.get_capabilities()
self.assertEqual(expected_capabilities, actual_capabilities)
|
{
"content_hash": "eaf012fb773e7e25a3fb4b18085ed354",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 72,
"avg_line_length": 38.07692307692308,
"alnum_prop": 0.6097643097643097,
"repo_name": "chungg/aodh",
"id": "8dc61de94ec2d3f935a88aab14c1badb8defa804",
"size": "3571",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aodh/tests/storage/test_impl_mongodb.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "697346"
},
{
"name": "Shell",
"bytes": "5196"
}
],
"symlink_target": ""
}
|
"""
Implementation of logging for PyWPS-4
"""
from pywps import configuration
from pywps.exceptions import NoApplicableCode
import sqlite3
import datetime
_CONNECTION = None
def log_request(uuid, request):
"""Write OGC WPS request (only the necessary parts) to database logging
system
"""
conn = get_connection()
insert = """
INSERT INTO
pywps_requests (uuid, operation, version, time_start, identifier)
VALUES
('{uuid}', '{operation}', '{version}', '{time_start}', '{identifier}')
""".format(
uuid=uuid,
operation=request.operation,
version=request.version,
time_start=datetime.datetime.now().isoformat(),
identifier=_get_identifier(request)
)
cur = conn.cursor()
cur.execute(insert)
conn.commit()
close_connection()
def update_response(uuid, response, close=False):
"""Writes response to database
"""
conn = get_connection()
message = 'Null'
status_percentage = 'Null'
status = 'Null'
if hasattr(response, 'message'):
message = "'%s'" % response.message
if hasattr(response, 'status_percentage'):
status_percentage = response.status_percentage
if hasattr(response, 'status'):
status = "'%s'" % response.status
update = """
UPDATE
pywps_requests
SET
time_end = '{time_end}', message={message},
percent_done = {percent_done}, status={status}
WHERE
uuid = '{uuid}'
""".format(
time_end=datetime.datetime.now().isoformat(),
message=message,
percent_done=status_percentage,
status=status,
uuid=uuid
)
cur = conn.cursor()
cur.execute(update)
conn.commit()
close_connection()
def _get_identifier(request):
"""Get operation identifier
"""
if request.operation == 'execute':
return request.identifier
elif request.operation == 'describeprocess':
if request.identifiers:
return ','.join(request.identifiers)
else:
return 'Null'
else:
return 'NULL'
def get_connection():
"""Get Connection for database
"""
global _CONNECTION
if _CONNECTION:
return _CONNECTION
database = configuration.get_config_value('server', 'logdatabase')
if not database:
database = ':memory:'
connection = sqlite3.connect(database)
if check_db_table(connection):
if check_db_columns(connection):
_CONNECTION = connection
else:
raise NoApplicableCode("""
Columns in the table 'pywps_requests' in database '%s' are in
conflict
""" % database)
else:
createsql = """
CREATE TABLE pywps_requests(
uuid VARCHAR(255) not null primary key,
operation varchar(30) not null,
version varchar(5) not null,
time_start text not null,
time_end text,
identifier text,
message text,
percent_done float,
status varchar(30)
)
"""
_CONNECTION = sqlite3.connect(database, check_same_thread=False)
cursor = _CONNECTION.cursor()
cursor.execute(createsql)
_CONNECTION.commit()
return _CONNECTION
def check_db_table(connection):
"""Check for existing pywps_requests table in the datase
:return: boolean pywps_requests table is in database
"""
cursor = connection.cursor()
cursor.execute("""
SELECT
name
FROM
sqlite_master
WHERE
name='pywps_requests'
""")
table = cursor.fetchone()
if table:
return True
else:
return False
def check_db_columns(connection):
"""Simple check for existing columns in given database
we will make just simple check, this is not django
:return: all needed columns found
:rtype: boolean
"""
cur = connection.cursor()
cur.execute("""PRAGMA table_info('pywps_requests')""")
metas = cur.fetchall()
columns = []
for column in metas:
columns.append(column[1])
needed_columns = ['uuid', 'operation', 'version', 'time_start',
'time_end', 'identifier', 'message', 'percent_done',
'status']
needed_columns.sort()
columns.sort()
if columns == needed_columns:
return True
else:
return False
def close_connection():
global _CONNECTION
if _CONNECTION:
_CONNECTION.close()
_CONNECTION = None
|
{
"content_hash": "d3912e0c412f90cf07b383fc7a637b90",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 82,
"avg_line_length": 25.231182795698924,
"alnum_prop": 0.5785212017898999,
"repo_name": "ricardogsilva/PyWPS",
"id": "96a2b831c7f14bb2e171b15f7cb5a82447d7036a",
"size": "4693",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pywps/dblog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "219754"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from geocoder.base import Base
class IpinfoIo(Base):
"""
API Reference
-------------
https://ipinfo.io
"""
provider = 'ipinfoio'
method = 'geocode'
def __init__(self, location='', **kwargs):
self.location = location
self.url = 'http://ipinfo.io/{0}/json'.format(self.location)
self._initialize(**kwargs)
self._ipinfoio_catch_errors()
def _ipinfoio_catch_errors(self):
error = self.content.get('error')
if error:
code = self.content.get('code')
self.error = code
def _exceptions(self):
subdivisions = self.content.get('subdivisions')
if subdivisions:
self.content['subdivision'] = subdivisions[0]
# Grab all names in [en] and place them in self.parse
for key, value in self.content.items():
if isinstance(value, dict):
for minor_key, minor_value in value.items():
if minor_key == 'names':
self.parse[key] = minor_value['en']
@property
def lat(self):
return self.parse.get('loc').split(',')[0]
@property
def lng(self):
return self.parse.get('loc').split(',')[1]
@property
def address(self):
if self.city:
return '{0}, {1}, {2}'.format(self.city, self.state, self.country)
elif self.state:
return '{0}, {1}'.format(self.state, self.country)
elif self.country:
return '{0}'.format(self.country)
else:
return ''
@property
def postal(self):
return self.parse.get('postal')
@property
def city(self):
return self.parse.get('city')
@property
def state(self):
return self.parse.get('region')
@property
def country(self):
return self.parse.get('country')
@property
def hostname(self):
return self.parse.get('hostname')
@property
def ip(self):
return self.parse.get('ip')
@property
def org(self):
return self.parse.get('org')
if __name__ == '__main__':
g = IpinfoIo('')
g.debug()
|
{
"content_hash": "13eaa0c368896a79d01653f6a4bf52e5",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 78,
"avg_line_length": 25.488372093023255,
"alnum_prop": 0.5483576642335767,
"repo_name": "epyatopal/geocoder-1",
"id": "65fa4e5e83083ec328d2b2afbda0a702f575997e",
"size": "2226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geocoder/ipinfoio.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "422"
},
{
"name": "Python",
"bytes": "126366"
}
],
"symlink_target": ""
}
|
import urlparse
# project
from util import headers
from checks import AgentCheck
# 3rd party
import requests
class Apache(AgentCheck):
"""Tracks basic connection/requests/workers metrics
See http://httpd.apache.org/docs/2.2/mod/mod_status.html for more details
"""
GAUGES = {
'IdleWorkers': 'apache.performance.idle_workers',
'BusyWorkers': 'apache.performance.busy_workers',
'CPULoad': 'apache.performance.cpu_load',
'Uptime': 'apache.performance.uptime',
'Total kBytes': 'apache.net.bytes',
'Total Accesses': 'apache.net.hits',
}
RATES = {
'Total kBytes': 'apache.net.bytes_per_s',
'Total Accesses': 'apache.net.request_per_s'
}
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
self.assumed_url = {}
def check(self, instance):
if 'apache_status_url' not in instance:
raise Exception("Missing 'apache_status_url' in Apache config")
url = self.assumed_url.get(instance['apache_status_url'], instance['apache_status_url'])
tags = instance.get('tags', [])
auth = None
if 'apache_user' in instance and 'apache_password' in instance:
auth = (instance['apache_user'], instance['apache_password'])
# Submit a service check for status page availability.
parsed_url = urlparse.urlparse(url)
apache_host = parsed_url.hostname
apache_port = parsed_url.port or 80
service_check_name = 'apache.can_connect'
service_check_tags = ['host:%s' % apache_host, 'port:%s' % apache_port]
try:
r = requests.get(url, auth=auth, headers=headers(self.agentConfig))
r.raise_for_status()
except Exception:
self.service_check(service_check_name, AgentCheck.CRITICAL,
tags=service_check_tags)
raise
else:
self.service_check(service_check_name, AgentCheck.OK,
tags=service_check_tags)
response = r.content
metric_count = 0
# Loop through and extract the numerical values
for line in response.splitlines():
values = line.split(': ')
if len(values) == 2: # match
metric, value = values
try:
value = float(value)
except ValueError:
continue
# Special case: kBytes => bytes
if metric == 'Total kBytes':
value = value * 1024
# Send metric as a gauge, if applicable
if metric in self.GAUGES:
metric_count += 1
metric_name = self.GAUGES[metric]
self.gauge(metric_name, value, tags=tags)
# Send metric as a rate, if applicable
if metric in self.RATES:
metric_count += 1
metric_name = self.RATES[metric]
self.rate(metric_name, value, tags=tags)
if metric_count == 0:
if self.assumed_url.get(instance['apache_status_url'], None) is None and url[-5:] != '?auto':
self.assumed_url[instance['apache_status_url']]= '%s?auto' % url
self.warning("Assuming url was not correct. Trying to add ?auto suffix to the url")
self.check(instance)
else:
raise Exception("No metrics were fetched for this instance. Make sure that %s is the proper url." % instance['apache_status_url'])
|
{
"content_hash": "5bb691704414ac1f79bd9d93340f0b22",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 146,
"avg_line_length": 37.97938144329897,
"alnum_prop": 0.5667752442996743,
"repo_name": "JohnLZeller/dd-agent",
"id": "9dece7b8e9d510e2215953165fde867df751b97a",
"size": "3693",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "checks.d/apache.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2717"
},
{
"name": "Go",
"bytes": "917"
},
{
"name": "HTML",
"bytes": "8758"
},
{
"name": "Nginx",
"bytes": "3404"
},
{
"name": "PowerShell",
"bytes": "2665"
},
{
"name": "Python",
"bytes": "1454456"
},
{
"name": "Ruby",
"bytes": "57718"
},
{
"name": "Shell",
"bytes": "38669"
},
{
"name": "XSLT",
"bytes": "2222"
}
],
"symlink_target": ""
}
|
"""
Determine whether an integer is a palindrome. Do this without extra space.
"""
__author__ = 'Danyang'
class Solution:
def isPalindrome(self, x):
"""
Algorithm: int, compare lsb and msb
No extra space
If you are thinking of converting the integer to string, note the restriction of using extra space.
:param x: int
:return: boolean
"""
if x < 0:
return False
# find order of magnitude
div = 1
while x/div >= 10:
div *= 10 # without touch x
while x > 0:
msb = x/div
lsb = x%10
if msb != lsb:
return False
# shrink
x %= div
x /= 10
div /= 100
return True
if __name__ == "__main__":
Solution().isPalindrome(2147483647)
|
{
"content_hash": "243de43c745a21b3ba39277e77593d0e",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 107,
"avg_line_length": 21.595238095238095,
"alnum_prop": 0.4696802646085998,
"repo_name": "algorhythms/LeetCode",
"id": "d871ba7b55172b6a30aa9f842faae5d31e80e8f4",
"size": "907",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "009 Palindrome Number.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1444167"
}
],
"symlink_target": ""
}
|
from ..utils import RequestHandler
import logging
class _PrometheusCollector:
def __init__(self, server):
self.worker = server
self.logger = logging.getLogger("distributed.dask_worker")
self.crick_available = True
try:
import crick # noqa: F401
except ImportError:
self.crick_available = False
self.logger.info(
"Not all prometheus metrics available are exported. Digest-based metrics require crick to be installed"
)
def collect(self):
from prometheus_client.core import GaugeMetricFamily
tasks = GaugeMetricFamily(
"dask_worker_tasks", "Number of tasks at worker.", labels=["state"]
)
tasks.add_metric(["stored"], len(self.worker.data))
tasks.add_metric(["executing"], len(self.worker.executing))
tasks.add_metric(["ready"], len(self.worker.ready))
tasks.add_metric(["waiting"], len(self.worker.waiting_for_data))
tasks.add_metric(["serving"], len(self.worker._comms))
yield tasks
yield GaugeMetricFamily(
"dask_worker_connections",
"Number of task connections to other workers.",
value=len(self.worker.in_flight_workers),
)
yield GaugeMetricFamily(
"dask_worker_threads",
"Number of worker threads.",
value=self.worker.nthreads,
)
yield GaugeMetricFamily(
"dask_worker_latency_seconds",
"Latency of worker connection.",
value=self.worker.latency,
)
# all metrics using digests require crick to be installed
# the following metrics will export NaN, if the corresponding digests are None
if self.crick_available:
yield GaugeMetricFamily(
"dask_worker_tick_duration_median_seconds",
"Median tick duration at worker.",
value=self.worker.digests["tick-duration"].components[1].quantile(50),
)
yield GaugeMetricFamily(
"dask_worker_task_duration_median_seconds",
"Median task runtime at worker.",
value=self.worker.digests["task-duration"].components[1].quantile(50),
)
yield GaugeMetricFamily(
"dask_worker_transfer_bandwidth_median_bytes",
"Bandwidth for transfer at worker in Bytes.",
value=self.worker.digests["transfer-bandwidth"]
.components[1]
.quantile(50),
)
class PrometheusHandler(RequestHandler):
_initialized = False
def __init__(self, *args, **kwargs):
import prometheus_client
super(PrometheusHandler, self).__init__(*args, **kwargs)
if PrometheusHandler._initialized:
return
prometheus_client.REGISTRY.register(_PrometheusCollector(self.server))
PrometheusHandler._initialized = True
def get(self):
import prometheus_client
self.write(prometheus_client.generate_latest())
self.set_header("Content-Type", "text/plain; version=0.0.4")
routes = [
(r"metrics", PrometheusHandler, {}),
]
|
{
"content_hash": "359684318e78b75741b7667a171da24d",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 119,
"avg_line_length": 33.01020408163265,
"alnum_prop": 0.598145285935085,
"repo_name": "blaze/distributed",
"id": "a60de3a6b64856610a6eaf5b31415dd340b2b4d0",
"size": "3235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "distributed/http/worker/prometheus.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "236"
},
{
"name": "Python",
"bytes": "511624"
},
{
"name": "Shell",
"bytes": "1120"
}
],
"symlink_target": ""
}
|
'''
Salt-specific interface for calling Salt Cloud directly
'''
# Import python libs
from __future__ import absolute_import
import os
import logging
import copy
# Import salt libs
try:
import salt.cloud
HAS_SALTCLOUD = True
except ImportError:
HAS_SALTCLOUD = False
import salt.utils
# Import 3rd-party libs
import salt.ext.six as six
log = logging.getLogger(__name__)
__func_alias__ = {
'profile_': 'profile'
}
def __virtual__():
'''
Only work on POSIX-like systems
'''
if HAS_SALTCLOUD:
return True
return (False, 'The cloud execution module cannot be loaded: only available on non-Windows systems.')
def _get_client():
'''
Return a cloud client
'''
client = salt.cloud.CloudClient(
os.path.join(os.path.dirname(__opts__['conf_file']), 'cloud'),
pillars=copy.deepcopy(__pillar__.get('cloud', {}))
)
return client
def list_sizes(provider='all'):
'''
List cloud provider sizes for the given providers
CLI Example:
.. code-block:: bash
salt '*' cloud.list_sizes my-gce-config
'''
client = _get_client()
sizes = client.list_sizes(provider)
return sizes
def list_images(provider='all'):
'''
List cloud provider images for the given providers
CLI Example:
.. code-block:: bash
salt '*' cloud.list_images my-gce-config
'''
client = _get_client()
images = client.list_images(provider)
return images
def list_locations(provider='all'):
'''
List cloud provider locations for the given providers
CLI Example:
.. code-block:: bash
salt '*' cloud.list_locations my-gce-config
'''
client = _get_client()
locations = client.list_locations(provider)
return locations
def query(query_type='list_nodes'):
'''
List cloud provider data for all providers
CLI Examples:
.. code-block:: bash
salt '*' cloud.query
salt '*' cloud.query list_nodes_full
salt '*' cloud.query list_nodes_select
'''
client = _get_client()
info = client.query(query_type)
return info
def full_query(query_type='list_nodes_full'):
'''
List all available cloud provider data
CLI Example:
.. code-block:: bash
salt '*' cloud.full_query
'''
return query(query_type=query_type)
def select_query(query_type='list_nodes_select'):
'''
List selected nodes
CLI Example:
.. code-block:: bash
salt '*' cloud.select_query
'''
return query(query_type=query_type)
def has_instance(name, provider=None):
'''
Return true if the instance is found on a provider
CLI Example:
.. code-block:: bash
salt '*' cloud.has_instance myinstance
'''
data = get_instance(name, provider)
if data is None:
return False
return True
def get_instance(name, provider=None):
'''
Return details on an instance.
Similar to the cloud action show_instance
but returns only the instance details.
CLI Example:
.. code-block:: bash
salt '*' cloud.get_instance myinstance
SLS Example:
.. code-block:: bash
{{ salt['cloud.get_instance']('myinstance')['mac_address'] }}
'''
data = action(fun='show_instance', names=[name], provider=provider)
info = salt.utils.cloud.simple_types_filter(data)
try:
# get the first: [alias][driver][vm_name]
info = next(six.itervalues(next(six.itervalues(next(six.itervalues(info))))))
except AttributeError:
return None
return info
def profile_(profile, names, vm_overrides=None, **kwargs):
'''
Spin up an instance using Salt Cloud
CLI Example:
.. code-block:: bash
salt '*' cloud.profile my-gce-config myinstance
'''
client = _get_client()
info = client.profile(profile, names, vm_overrides=vm_overrides, **kwargs)
return info
def destroy(names):
'''
Destroy the named VM(s)
CLI Example:
.. code-block:: bash
salt '*' cloud.destroy myinstance
'''
client = _get_client()
info = client.destroy(names)
return info
def action(
fun=None,
cloudmap=None,
names=None,
provider=None,
instance=None,
**kwargs):
'''
Execute a single action on the given provider/instance
CLI Example:
.. code-block:: bash
salt '*' cloud.action start instance=myinstance
salt '*' cloud.action stop instance=myinstance
salt '*' cloud.action show_image provider=my-ec2-config image=ami-1624987f
'''
client = _get_client()
info = client.action(fun, cloudmap, names, provider, instance, kwargs)
return info
def create(provider, names, **kwargs):
'''
Create an instance using Salt Cloud
CLI Example:
.. code-block:: bash
salt minionname cloud.create my-ec2-config myinstance image=ami-1624987f size='t1.micro' ssh_username=ec2-user securitygroup=default delvol_on_destroy=True
'''
client = _get_client()
info = client.create(provider, names, **kwargs)
return info
def volume_list(provider):
'''
List block storage volumes
CLI Example:
.. code-block:: bash
salt minionname cloud.volume_list my-nova
'''
client = _get_client()
info = client.extra_action(action='volume_list', provider=provider, names='name')
return info['name']
def volume_delete(provider, names, **kwargs):
'''
Delete volume
CLI Example:
.. code-block:: bash
salt minionname cloud.volume_delete my-nova myblock
'''
client = _get_client()
info = client.extra_action(provider=provider, names=names, action='volume_delete', **kwargs)
return info
def volume_create(provider, names, **kwargs):
'''
Create volume
CLI Example:
.. code-block:: bash
salt minionname cloud.volume_create my-nova myblock size=100 voltype=SSD
'''
client = _get_client()
info = client.extra_action(action='volume_create', names=names, provider=provider, **kwargs)
return info
def volume_attach(provider, names, **kwargs):
'''
Attach volume to a server
CLI Example:
.. code-block:: bash
salt minionname cloud.volume_attach my-nova myblock server_name=myserver device='/dev/xvdf'
'''
client = _get_client()
info = client.extra_action(provider=provider, names=names, action='volume_attach', **kwargs)
return info
def volume_detach(provider, names, **kwargs):
'''
Detach volume from a server
CLI Example:
.. code-block:: bash
salt minionname cloud.volume_detach my-nova myblock server_name=myserver
'''
client = _get_client()
info = client.extra_action(provider=provider, names=names, action='volume_detach', **kwargs)
return info
def network_list(provider):
'''
List private networks
CLI Example:
.. code-block:: bash
salt minionname cloud.network_list my-nova
'''
client = _get_client()
return client.extra_action(action='network_list', provider=provider, names='names')
def network_create(provider, names, **kwargs):
'''
Create private network
CLI Example:
.. code-block:: bash
salt minionname cloud.network_create my-nova names=['salt'] cidr='192.168.100.0/24'
'''
client = _get_client()
return client.extra_action(provider=provider, names=names, action='network_create', **kwargs)
def virtual_interface_list(provider, names, **kwargs):
'''
List virtual interfaces on a server
CLI Example:
.. code-block:: bash
salt minionname cloud.virtual_interface_list my-nova names=['salt-master']
'''
client = _get_client()
return client.extra_action(provider=provider, names=names, action='virtual_interface_list', **kwargs)
def virtual_interface_create(provider, names, **kwargs):
'''
Attach private interfaces to a server
CLI Example:
.. code-block:: bash
salt minionname cloud.virtual_interface_create my-nova names=['salt-master'] net_name='salt'
'''
client = _get_client()
return client.extra_action(provider=provider, names=names, action='virtual_interface_create', **kwargs)
|
{
"content_hash": "559600db33609261cff76c4fed9de217",
"timestamp": "",
"source": "github",
"line_count": 389,
"max_line_length": 163,
"avg_line_length": 21.282776349614394,
"alnum_prop": 0.6343761323831381,
"repo_name": "stephane-martin/salt-debian-packaging",
"id": "f4cd488631105c91776c29fa1b67effdf3ef1673",
"size": "8303",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "salt-2016.3.2/salt/modules/cloud.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13798"
},
{
"name": "C",
"bytes": "986"
},
{
"name": "Groff",
"bytes": "13634346"
},
{
"name": "HTML",
"bytes": "39558"
},
{
"name": "Makefile",
"bytes": "20902"
},
{
"name": "NSIS",
"bytes": "22316"
},
{
"name": "PowerShell",
"bytes": "38719"
},
{
"name": "Python",
"bytes": "40857506"
},
{
"name": "SaltStack",
"bytes": "58278"
},
{
"name": "Scheme",
"bytes": "1790"
},
{
"name": "Shell",
"bytes": "829927"
},
{
"name": "Tcl",
"bytes": "6532"
},
{
"name": "TeX",
"bytes": "11632"
}
],
"symlink_target": ""
}
|
import csv
from ibms.db_funcs import csvload, saverow
from sfm.models import CostCentre, SFMMetric, MeasurementValue, Quarter
COLS_SFM_METRICS = 5
COLS_COSTCENTRES = 3
def import_to_sfmmetrics(fileName, fy):
reader, file, fileName = csvload(fileName)
try:
i = 1
for row in reader:
metric, created = SFMMetric.objects.get_or_create(
fy=fy,
servicePriorityNo=row[1],
metricID=row[2],
)
metric.region = row[0]
metric.descriptor = row[3]
metric.example = row[4]
metric.save()
i += 1
except SFMMetric.DoesNotExist:
raise Exception('Row {}:{}\nPlease import servicePriorityNo into IBMData before proceeding, otherwise database integrity will be compromised.'.format(i, row[0]))
return
def import_to_costcentres(fileName):
reader, file, fileName = csvload(fileName)
try:
i = 1
for row in reader:
if CostCentre.objects.filter(costCentre=row[0]):
cc = CostCentre.objects.get(costCentre=row[0])
cc.name = row[1]
cc.region = row[2]
cc.save()
else:
CostCentre.objects.create(
costCentre=row[0],
name=row[1],
region=row[2],
)
i += 1
except:
raise Exception('Row {}:{}\nhas invalid data. Unable to import.'.format(i, row))
return
def import_measurementvalues(fileName, fy):
reader, file, fileName = csvload(fileName)
try:
i = 1
for row in reader:
# Assumes Quarter is in the format "2020/21 Q1 (Jul - Sep)"
try:
quarter = Quarter.objects.get(fy=fy, description=row[0].split(maxsplit=1)[1])
except Quarter.DoesNotExist:
raise Exception('Quarter in row {}:{}\nnot found, unable to import.'.format(i, row))
try:
metric = SFMMetric.objects.get(fy=fy, metricID=row[2])
except SFMMetric.DoesNotExist:
raise Exception('SFMMetric in row {}:{}\nnot found, unable to import.'.format(i, row))
query = {
"quarter": quarter,
"region": row[1],
"sfmMetric": metric,
}
data = {
"quarter": quarter,
"region": row[1],
"sfmMetric": metric,
"planned": row[3] == 'TRUE' if row[3] else None,
"status": row[4].lower(),
"comment": row[5],
}
saverow(MeasurementValue, data, query)
i += 1
except Exception as e:
raise Exception('Row {}:{}\nhas invalid data. Unable to import.\n{}'.format(i, row, e.message))
return
def process_upload_file(file_name, fileType, fy):
if fileType == 'sfmmetrics':
import_to_sfmmetrics(file_name, fy)
elif fileType == 'costcentres':
import_to_costcentres(file_name)
elif fileType == 'measurementvalues':
import_measurementvalues(file_name, fy)
def validate_file(file, fileType):
reader = csv.reader(file, dialect='excel')
if fileType == 'sfmmetrics':
return validate_sfmmetrics_header(reader)
if fileType == 'costcentres':
return validate_costcentre_header(reader)
if fileType == 'measurementvalues':
return validate_measurementvalues_header(reader)
return False
def validate_sfmmetrics_header(reader):
row = next(reader)
if len(row) == COLS_SFM_METRICS:
sBad = ''
if row[0].strip() != 'region':
sBad += row[0] + ' : ' + 'region\n'
if row[1].strip() != 'servicePriorityNo':
sBad += row[1] + ' : ' + 'servicePriorityNo\n'
if row[2].strip() != 'metricID':
sBad += row[2] + ' : ' + 'metricID\n'
if row[3].strip() != 'descriptor':
sBad += row[3] + ' : ' + 'descriptor\n'
retVal = sBad == ''
if not retVal:
raise Exception('The column headings in the CSV file do not match the required headings\n{}'.format(sBad))
else:
raise Exception('The number of columns in the CSV file do not match the required column count :\nExpects {}, found {}'.format(COLS_SFM_METRICS, len(row)))
return retVal
def validate_costcentre_header(reader):
row = next(reader)
if len(row) == COLS_COSTCENTRES:
sBad = ''
if row[0].strip() != 'costCentre':
sBad += row[0] + ' : ' + 'costCentre\n'
if row[1].strip() != 'name':
sBad += row[1] + ' : ' + 'name\n'
if row[2].strip() != 'region':
sBad += row[2] + ' : ' + 'region\n'
retVal = sBad == ''
if not retVal:
raise Exception('The column headings in the CSV file do not match the required headings\n{}'.format(sBad))
else:
raise Exception('The number of columns in the CSV file do not match the required column count :\nExpects {}, found {}'.format(COLS_COSTCENTRES, len(row)))
return retVal
def validate_measurementvalues_header(reader):
row = next(reader)
sBad = ''
if row[0].strip() != 'quarter':
sBad += row[0] + ' : ' + 'quarter\n'
if row[1].strip() != 'region':
sBad += row[1] + ' : ' + 'region\n'
if row[2].strip() != 'sfmMetric':
sBad += row[2] + ' : ' + 'sfmMetric\n'
if row[3].strip() != 'planned':
sBad += row[3] + ' : ' + 'planned\n'
if row[4].strip() != 'status':
sBad += row[4] + ' : ' + 'status\n'
if row[5].strip() != 'comment':
sBad += row[5] + ' : ' + 'comment\n'
retVal = sBad == ''
if not retVal:
raise Exception('The column headings in the CSV file do not match the required headings\n' + sBad)
return retVal
|
{
"content_hash": "debf9296f9aa0f60dff3a4fac384a96a",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 169,
"avg_line_length": 33.28813559322034,
"alnum_prop": 0.5441276306856755,
"repo_name": "parksandwildlife/ibms",
"id": "9e0f60b841315b1455083c0f2126e71b109b784f",
"size": "5892",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ibms_project/sfm/sfm_file_funcs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25"
},
{
"name": "HTML",
"bytes": "36943"
},
{
"name": "Python",
"bytes": "208533"
},
{
"name": "Shell",
"bytes": "2386"
}
],
"symlink_target": ""
}
|
"""
WSGI config for main project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "main.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
|
{
"content_hash": "e96c210983180a82991469048eb11a5e",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 78,
"avg_line_length": 26.5,
"alnum_prop": 0.7882599580712788,
"repo_name": "rajeshgupta14/pathend",
"id": "8373c0eb9b4780407d416acbac92470ab2c1ca3f",
"size": "477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main/wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "26412"
},
{
"name": "HTML",
"bytes": "86059"
},
{
"name": "JavaScript",
"bytes": "284160"
},
{
"name": "Python",
"bytes": "183208"
}
],
"symlink_target": ""
}
|
import contextlib
import os
from exceptions import Exception
def determine_jdk_directory(cluster):
"""
Return the directory where the JDK is installed. For example if the JDK is
located in /usr/java/jdk1.8_91, then this method will return the string
'jdk1.8_91'.
This method will throw an Exception if the number of JDKs matching the
/usr/java/jdk* pattern is not equal to 1.
:param cluster: cluster on which to search for the JDK directory
"""
number_of_jdks = cluster.exec_cmd_on_host(cluster.master, 'bash -c "ls -ld /usr/java/j*| wc -l"')
if int(number_of_jdks) != 1:
raise Exception('The number of JDK directories matching /usr/java/jdk* is not 1')
output = cluster.exec_cmd_on_host(cluster.master, 'ls -d /usr/java/j*')
return output.split(os.path.sep)[-1].strip('\n')
@contextlib.contextmanager
def relocate_jdk_directory(cluster, destination):
"""
Temporarily move the JDK to the destination directory
:param cluster: cluster object on which to relocate the JDK directory
:param destination: destination parent JDK directory, e.g. /tmp/
:returns the new full JDK directory, e.g. /tmp/jdk1.8_91
"""
# assume that Java is installed in the same folder on all nodes
jdk_directory = determine_jdk_directory(cluster)
source_jdk = os.path.join('/usr/java', jdk_directory)
destination_jdk = os.path.join(destination, jdk_directory)
for host in cluster.all_hosts():
cluster.exec_cmd_on_host(
host, "mv %s %s" % (source_jdk, destination_jdk), invoke_sudo=True)
yield destination_jdk
for host in cluster.all_hosts():
cluster.exec_cmd_on_host(
host, "mv %s %s" % (destination_jdk, source_jdk), invoke_sudo=True)
|
{
"content_hash": "b8d16896b8571b2eb30234d9d9a9e8b9",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 101,
"avg_line_length": 38.391304347826086,
"alnum_prop": 0.6834654586636466,
"repo_name": "prestodb/presto-admin",
"id": "edd66ad6c1373d28a843208da602ad459b55c544",
"size": "2333",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/product/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "7410"
},
{
"name": "Python",
"bytes": "695142"
},
{
"name": "Shell",
"bytes": "6738"
}
],
"symlink_target": ""
}
|
"""Tests for GRU V2 layer."""
import copy
import os
import shutil
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import combinations
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.layers import recurrent as rnn_v1
from tensorflow.python.keras.layers import recurrent_v2 as rnn
from tensorflow.python.keras.utils import np_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
# Global config for grappler setting that is used for graph mode test.
_rewrites = rewriter_config_pb2.RewriterConfig()
_rewrites.implementation_selector = rewriter_config_pb2.RewriterConfig.ON
_rewrites.min_graph_nodes = -1
_graph_options = config_pb2.GraphOptions(rewrite_options=_rewrites)
_config = config_pb2.ConfigProto(graph_options=_graph_options)
@testing_utils.run_all_without_tensor_float_32('RNN GRU can use TF32 on GPU')
@keras_parameterized.run_all_keras_modes(config=_config)
class GRUV2Test(keras_parameterized.TestCase):
@parameterized.named_parameters(
('non_tan_activation', 'relu', 'sigmoid', 0, False, True, True),
('non_sigmoid_recur_activation', 'tanh', 'relu', 0, False, True, True),
('use_recurrent_dropout', 'tanh', 'sigmoid', 0.1, False, True, True),
('unroll', 'tanh', 'sigmoid', 0, True, True, True),
('not_use_bias', 'tanh', 'sigmoid', 0, False, False, True),
('not_reset_after', 'tanh', 'sigmoid', 0, False, True, False)
)
def test_could_use_defun_backend(self, activation, recurrent_activation,
recurrent_dropout, unroll, use_bias,
reset_after):
layer = rnn.GRU(1,
activation=activation,
recurrent_activation=recurrent_activation,
recurrent_dropout=recurrent_dropout,
unroll=unroll,
use_bias=use_bias,
reset_after=reset_after)
self.assertFalse(layer._could_use_gpu_kernel)
@testing_utils.run_v2_only
def test_use_on_default_activation_with_gpu_kernel(self):
layer = rnn.GRU(1, activation=nn.tanh)
self.assertTrue(layer._could_use_gpu_kernel)
layer = rnn.GRU(1, recurrent_activation=nn.sigmoid)
self.assertTrue(layer._could_use_gpu_kernel)
def test_keras_model_with_gru(self):
input_shape = 10
rnn_state_size = 8
output_shape = 8
timestep = 4
batch = 100
epoch = 10
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape)
y_train = np_utils.to_categorical(y_train, output_shape)
layer = rnn.GRU(rnn_state_size)
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=dtypes.float32)
outputs = layer(inputs)
model = keras.models.Model(inputs, outputs)
model.compile('rmsprop', loss='mse')
model.fit(x_train, y_train, epochs=epoch)
model.evaluate(x_train, y_train)
model.predict(x_train)
def test_dynamic_behavior_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer = rnn.GRU(units, input_shape=(None, embedding_dim))
model = keras.models.Sequential()
model.add(layer)
model.compile(gradient_descent.GradientDescentOptimizer(0.001), 'mse')
x = np.random.random((num_samples, timesteps, embedding_dim))
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
def test_stacking_GRU(self):
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(rnn.GRU(10, return_sequences=True, unroll=False))
model.add(rnn.GRU(5, return_sequences=True, unroll=False))
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.01))
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_from_config_GRU(self):
layer_class = rnn.GRU
for stateful in (False, True):
l1 = layer_class(units=1, stateful=stateful)
l2 = layer_class.from_config(l1.get_config())
assert l1.get_config() == l2.get_config()
@test.disable_with_predicate(
pred=test.is_built_with_rocm,
skip_message='Skipping as ROCm MIOpen does not support padded input yet.')
@testing_utils.run_v2_only
def test_gru_v2_feature_parity_with_canonical_gru(self):
input_shape = 10
rnn_state_size = 8
timestep = 4
batch = 20
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=rnn_state_size,
random_seed=87654321)
y_train = np_utils.to_categorical(y_train, rnn_state_size)
# For the last batch item of the test data, we filter out the last
# timestep to simulate the variable length sequence and masking test.
x_train[-2:, -1, :] = 0.0
y_train[-2:] = 0
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=dtypes.float32)
masked_input = keras.layers.Masking()(inputs)
gru_layer = rnn_v1.GRU(rnn_state_size,
recurrent_activation='sigmoid',
reset_after=True)
output = gru_layer(masked_input)
gru_model = keras.models.Model(inputs, output)
weights = gru_model.get_weights()
y_1 = gru_model.predict(x_train)
gru_model.compile('rmsprop', 'mse')
gru_model.fit(x_train, y_train)
y_2 = gru_model.predict(x_train)
with testing_utils.device(should_use_gpu=True):
cudnn_layer = rnn.GRU(rnn_state_size,
recurrent_activation='sigmoid',
reset_after=True)
cudnn_model = keras.models.Model(inputs, cudnn_layer(masked_input))
cudnn_model.set_weights(weights)
y_3 = cudnn_model.predict(x_train)
cudnn_model.compile('rmsprop', 'mse')
cudnn_model.fit(x_train, y_train)
y_4 = cudnn_model.predict(x_train)
self.assertAllClose(y_1, y_3, rtol=2e-5, atol=2e-5)
self.assertAllClose(y_2, y_4, rtol=2e-5, atol=2e-5)
@parameterized.named_parameters(
# test_name, use_bias, bias_initializer, activation
('normal', True, 'zeros'),
('no_bias', False, 'zeros'),
('random_bias', True, 'random_uniform'),
)
def test_gru_v2_model_save_load(self, use_bias, bias_initializer):
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
batch = 10
timestep = 3
input_dim = 5
units = 2
x = np.random.random((batch, timestep, input_dim))
def build_model():
inputs = keras.layers.Input(
shape=[timestep, input_dim], dtype=dtypes.float32)
layer = rnn.GRU(
units,
use_bias=use_bias,
bias_initializer=bias_initializer)
output = layer(inputs)
return keras.models.Model(inputs, output), layer
model, layer = build_model()
y_ref = model.predict(x)
model.save_weights(h5_path)
cloned_model, new_layer = build_model()
cloned_model.load_weights(h5_path)
y = cloned_model.predict(x)
self.assertAllClose(y, y_ref)
self.assertAllClose(layer.get_weights(), new_layer.get_weights())
def test_gru_v2_output_on_multiple_kernel(self):
input_shape = 10
rnn_state_size = 8
timestep = 4
batch = 100
x_train = np.random.random((batch, timestep, input_shape))
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=dtypes.float32)
with testing_utils.device(should_use_gpu=False):
layer = rnn.GRU(rnn_state_size)
output = layer(inputs)
cpu_model = keras.models.Model(inputs, output)
weights = cpu_model.get_weights()
y_1 = cpu_model.predict(x_train)
with testing_utils.device(should_use_gpu=True):
layer = rnn.GRU(rnn_state_size)
output = layer(inputs)
gpu_model = keras.models.Model(inputs, output)
gpu_model.set_weights(weights)
y_2 = gpu_model.predict(x_train)
# Note that CuDNN uses 'sigmoid' as activation, so the GRU V2 uses
# 'sigmoid' as default. Construct the canonical GRU with sigmoid to achieve
# the same output.
with testing_utils.device(should_use_gpu=True):
layer = rnn_v1.GRU(rnn_state_size,
recurrent_activation='sigmoid',
reset_after=True)
output = layer(inputs)
canonical_model = keras.models.Model(inputs, output)
canonical_model.set_weights(weights)
y_3 = canonical_model.predict(x_train)
self.assertAllClose(y_1, y_2, rtol=1e-5, atol=1e-5)
self.assertAllClose(y_2, y_3, rtol=1e-5, atol=1e-5)
@parameterized.named_parameters(
# test_name, time_major, go_backwards
('normal', False, False),
('time_major', True, False),
('go_backwards', False, True),
('both', True, True),
)
def test_time_major_and_go_backward(self, time_major, go_backwards):
input_shape = 10
rnn_state_size = 8
timestep = 4
batch = 100
x_train = np.random.random((batch, timestep, input_shape))
def build_model(layer_cls):
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=dtypes.float32)
layer = layer_cls(rnn_state_size,
recurrent_activation='sigmoid',
time_major=time_major,
return_sequences=True,
go_backwards=go_backwards,
reset_after=True)
if time_major:
converted_input = keras.layers.Lambda(
lambda t: array_ops.transpose(t, [1, 0, 2]))(inputs)
outputs = layer(converted_input)
outputs = keras.layers.Lambda(
lambda t: array_ops.transpose(t, [1, 0, 2]))(outputs)
else:
outputs = layer(inputs)
return keras.models.Model(inputs, outputs)
gru_model = build_model(rnn_v1.GRU)
y_ref = gru_model.predict(x_train)
weights = gru_model.get_weights()
gru_v2_model = build_model(rnn.GRU)
gru_v2_model.set_weights(weights)
y = gru_v2_model.predict(x_train)
self.assertAllClose(y, y_ref)
@test.disable_with_predicate(
pred=test.is_built_with_rocm,
skip_message='Skipping as ROCm MIOpen does not support padded input yet.')
def test_with_masking_layer_GRU(self):
layer_class = rnn.GRU
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(layer_class(units=5, return_sequences=True, unroll=False))
model.compile(loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.001))
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
@test.disable_with_predicate(
pred=test.is_built_with_rocm,
skip_message='Skipping as ROCm MIOpen does not support padded input yet.')
def test_masking_with_stacking_GRU(self):
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(rnn.GRU(10, return_sequences=True, unroll=False))
model.add(rnn.GRU(5, return_sequences=True, unroll=False))
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.01))
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_return_sequences_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
rnn.GRU,
kwargs={'units': units,
'return_sequences': True},
input_shape=(num_samples, timesteps, embedding_dim))
@test.disable_with_predicate(
pred=test.is_built_with_rocm,
skip_message='Double type is not yet supported in ROCm')
@testing_utils.run_v2_only
def test_float64_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
rnn.GRU,
kwargs={'units': units,
'return_sequences': True,
'dtype': 'float64'},
input_shape=(num_samples, timesteps, embedding_dim),
input_dtype='float64')
@test.disable_with_predicate(
pred=test.is_built_with_rocm,
skip_message='Skipping as ROCm MIOpen does not support padded input yet.')
def test_return_states_GRU(self):
layer_class = rnn.GRU
x = np.random.random((2, 3, 4))
y = np.abs(np.random.random((2, 5)))
s = np.abs(np.random.random((2, 5)))
inputs = keras.layers.Input(
shape=[3, 4], dtype=dtypes.float32)
masked = keras.layers.Masking()(inputs)
outputs, states = layer_class(units=5, return_state=True)(masked)
model = keras.models.Model(inputs, [outputs, states])
model.compile(loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.001))
model.fit(x, [y, s], epochs=1, batch_size=2, verbose=1)
def test_dropout_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
rnn.GRU,
kwargs={'units': units,
'dropout': 0.1,
'recurrent_dropout': 0.1},
input_shape=(num_samples, timesteps, embedding_dim))
def test_constraints_GRU(self):
embedding_dim = 4
layer_class = rnn.GRU
k_constraint = keras.constraints.max_norm(0.01)
r_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_constraint=k_constraint,
recurrent_constraint=r_constraint,
bias_constraint=b_constraint)
layer.build((None, None, embedding_dim))
self.assertEqual(layer.cell.kernel.constraint, k_constraint)
self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint)
self.assertEqual(layer.cell.bias.constraint, b_constraint)
@parameterized.parameters([0, 1, 2])
def test_implementation_mode_GRU(self, implementation_mode):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
rnn.GRU,
kwargs={'units': units,
'implementation': implementation_mode},
input_shape=(num_samples, timesteps, embedding_dim))
def test_regularizers_GRU(self):
embedding_dim = 4
layer_class = rnn.GRU
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_regularizer=keras.regularizers.l1(0.01),
recurrent_regularizer=keras.regularizers.l1(0.01),
bias_regularizer='l2',
activity_regularizer='l1')
layer.build((None, None, 2))
self.assertEqual(len(layer.losses), 3)
x = keras.backend.variable(np.ones((2, 3, 2)))
layer(x)
if context.executing_eagerly():
self.assertEqual(len(layer.losses), 4)
else:
self.assertEqual(len(layer.get_losses_for(x)), 1)
@test.disable_with_predicate(
pred=test.is_built_with_rocm,
skip_message='Skipping as ROCm MIOpen does not support padded input yet.')
def test_statefulness_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer_class = rnn.GRU
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
4,
embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps)))
layer = layer_class(
units, return_sequences=False, stateful=True, weights=None)
model.add(layer)
model.compile(
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))
# train once so that the states change
model.train_on_batch(
np.ones((num_samples, timesteps)), np.ones((num_samples, units)))
out2 = model.predict(np.ones((num_samples, timesteps)))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out2.max(), out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
np.testing.assert_allclose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out4.max(), out5.max())
# Check masking
layer.reset_states()
left_padded_input = np.ones((num_samples, timesteps))
left_padded_input[0, :1] = 0
left_padded_input[1, :2] = 0
out6 = model.predict(left_padded_input)
layer.reset_states()
right_padded_input = np.ones((num_samples, timesteps))
right_padded_input[0, -1:] = 0
right_padded_input[1, -2:] = 0
out7 = model.predict(right_padded_input)
layer.reset_states()
mix_padded_input = np.ones((num_samples, timesteps))
mix_padded_input[0, 1] = 0
mix_padded_input[1, 0] = 0
mix_padded_input[1, 2] = 0
out8 = model.predict(mix_padded_input)
self.assertAllClose(out7, out6, atol=1e-5)
self.assertAllClose(out8, out7, atol=1e-5)
def test_stateful_GRU_training(self):
# See b/123587692 for more context.
vocab_size = 20
embedding_dim = 10
batch_size = 8
timestep = 12
units = 5
x = np.random.randint(0, vocab_size, size=(batch_size, timestep))
y = np.random.randint(0, vocab_size, size=(batch_size, timestep))
model = keras.Sequential([
keras.layers.Embedding(vocab_size, embedding_dim,
batch_input_shape=[batch_size, timestep]),
rnn.GRU(units, return_sequences=True, stateful=True),
keras.layers.Dense(vocab_size)
])
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, epochs=1, shuffle=False)
@test.disable_with_predicate(
pred=test.is_built_with_rocm,
skip_message='Skipping as ROCm MIOpen does not support padded input yet.')
@testing_utils.run_v2_only
def test_explicit_device_with_go_backward_and_mask(self):
batch_size = 8
timestep = 7
masksteps = 5
units = 4
inputs = np.random.randn(batch_size, timestep, units).astype(np.float32)
mask = np.ones((batch_size, timestep)).astype(np.bool_)
mask[:, masksteps:] = 0
# Test for V1 behavior.
lstm_v1 = rnn_v1.GRU(units, return_sequences=True, go_backwards=True)
with testing_utils.device(should_use_gpu=True):
outputs_masked_v1 = lstm_v1(inputs, mask=constant_op.constant(mask))
outputs_trimmed_v1 = lstm_v1(inputs[:, :masksteps])
self.assertAllClose(outputs_masked_v1[:, -masksteps:], outputs_trimmed_v1)
# Test for V2 behavior.
lstm = rnn.GRU(units, return_sequences=True, go_backwards=True)
with testing_utils.device(should_use_gpu=True):
outputs_masked = lstm(inputs, mask=constant_op.constant(mask))
outputs_trimmed = lstm(inputs[:, :masksteps])
self.assertAllClose(outputs_masked[:, -masksteps:], outputs_trimmed)
@tf_test_util.enable_output_all_intermediates
def test_v1_session_behavior(self):
with ops.get_default_graph().as_default():
# See b/139132348 for more details.
x = np.random.uniform(size=(100, 4, 8))
y = np.random.uniform(size=(100, 1))
dataset = dataset_ops.Dataset.from_tensor_slices(
(x, y)).shuffle(100).batch(32)
inp = keras.layers.Input(shape=(4, 8))
layer = rnn.GRU(1)(inp)
layer = keras.layers.Dense(1)(layer)
model = keras.models.Model(inp, layer)
model.compile(loss='mse', optimizer='sgd')
model.fit(dataset)
def test_with_fully_masked_inputs(self):
num_samples = 8
timestep = 5
embedding_dim = 4
vocab_size = 20
units = 2
inputs = np.random.randint(0, vocab_size, size=(num_samples, timestep))
# Set the first inputs to be fully zero.
inputs[0, :] = 0.0
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
vocab_size,
embedding_dim,
mask_zero=True,
input_length=timestep,
batch_input_shape=(num_samples, timestep)))
layer = rnn.GRU(units)
model.add(layer)
model.compile(
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
# Make sure it doesn't crash with cudnn kernel.
model.predict(inputs)
# TODO (b/169895267): test with xla_gpu is disabled.
def test_deepcopy(self):
if not context.executing_eagerly():
self.skipTest('v2-only test')
original_layer = rnn.GRU(5)
copied_layer = copy.deepcopy(original_layer)
self.assertEqual(copied_layer.units, 5)
self.assertEqual(original_layer.get_config(), original_layer.get_config())
# Copy layer before layer call on inputs without weight initialization.
inputs = np.random.normal(size=[32, 10, 8]).astype(np.float32)
original_layer = rnn.GRU(4)
copied_layer = copy.deepcopy(original_layer)
outputs = original_layer(inputs)
copied_outputs = copied_layer(inputs)
self.assertNotAllClose(
self.evaluate(outputs), self.evaluate(copied_outputs))
# Copy layer after layer call on inputs with weight initialization.
original_layer = rnn.GRU(4)
outputs = original_layer(inputs)
copied_layer = copy.deepcopy(original_layer)
copied_outputs = copied_layer(inputs)
self.assertAllClose(self.evaluate(outputs), self.evaluate(copied_outputs))
@testing_utils.run_all_without_tensor_float_32('RNN GRU can use TF32 on GPU')
class GRULayerGradientTapeTest(keras_parameterized.TestCase):
@combinations.generate(combinations.combine(mode=['eager']))
def test_in_tape(self):
with self.test_session(config=_config):
time_steps = 10
embedding_size = 11
gru_unit_size = 12
gru = rnn.GRU(gru_unit_size,
return_sequences=True,
return_state=True,
recurrent_activation='sigmoid',
recurrent_initializer='glorot_uniform')
x = random_ops.random_uniform([1, time_steps, embedding_size])
y = random_ops.random_uniform([1, gru_unit_size])
with backprop.GradientTape() as tape:
hidden_state = array_ops.zeros([1, gru_unit_size], dtype=dtypes.float32)
_, state = gru(x, initial_state=hidden_state)
loss = math_ops.reduce_mean(math_ops.square(state - y))
tape.gradient(loss, gru.variables)
@testing_utils.run_all_without_tensor_float_32('RNN GRU can use TF32 on GPU')
@keras_parameterized.run_all_keras_modes(config=_config)
class GRUGraphRewriteTest(keras_parameterized.TestCase):
input_shape = 10
output_shape = 8
rnn_state_size = 8
timestep = 4
batch = 100
epoch = 1
def _test_runtime_with_model(self, model):
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=self.batch,
test_samples=0,
input_shape=(self.timestep, self.input_shape),
num_classes=self.output_shape)
y_train = np_utils.to_categorical(y_train, self.output_shape)
model.compile(
optimizer='sgd',
loss=['categorical_crossentropy', None])
existing_loss = 0
for _ in range(self.epoch):
history = model.fit(x_train, y_train)
loss_value = history.history['loss'][0]
self.assertNotEqual(existing_loss, loss_value)
existing_loss = loss_value
_, runtime_value = model.predict(x_train)
if test.is_gpu_available():
self.assertEqual(runtime_value[0], rnn._RUNTIME_GPU)
else:
self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU)
@testing_utils.run_v2_only
def test_GRU_runtime(self):
layer = rnn.GRU(self.rnn_state_size, return_runtime=True)
inputs = keras.layers.Input(
shape=[self.timestep, self.input_shape], dtype=dtypes.float32)
outputs, runtime = layer(inputs)
# Expand the runtime so that it is a 1D tensor instead of scalar.
# TF model does not work with scalar model output, specially during
# aggregation.
runtime = keras.layers.Lambda(
lambda x: array_ops.expand_dims(x, axis=-1))(runtime)
model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime])
self._test_runtime_with_model(model)
@test.disable_with_predicate(
pred=test.is_built_with_rocm,
skip_message='Skipping as ROCm MIOpen does not support padded input yet.')
@testing_utils.run_v2_only
def test_GRU_runtime_with_mask(self):
# Masking will affect which backend is selected based on whether the mask
# is strictly right padded.
layer = rnn.GRU(self.rnn_state_size, return_runtime=True)
inputs = keras.layers.Input(
shape=[self.timestep, self.input_shape], dtype=dtypes.float32)
masked_inputs = keras.layers.Masking()(inputs)
outputs, runtime = layer(masked_inputs)
# Expand the runtime so that it is a 1D tensor instead of scalar.
# TF model does not work with scalar model output, specially during
# aggregation.
runtime = keras.layers.Lambda(
lambda x: array_ops.expand_dims(x, axis=-1))(runtime)
model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime])
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=self.batch,
test_samples=0,
input_shape=(self.timestep, self.input_shape),
num_classes=self.output_shape)
y_train = np_utils.to_categorical(y_train, self.output_shape)
model.compile(
optimizer='sgd',
loss=['categorical_crossentropy', None],
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x_train, y_train)
# Verify unpadded data.
_, runtime_value = model.predict(x_train)
if test.is_gpu_available():
self.assertEqual(runtime_value[0], rnn._RUNTIME_GPU)
else:
self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU)
# Update x/y to be right padded by setting the last timestep to 0
x_train[:, -1, :] = 0
y_train[:, -1] = 0
_, runtime_value = model.predict(x_train)
if test.is_gpu_available():
self.assertEqual(runtime_value[0], rnn._RUNTIME_GPU)
else:
self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU)
# Further update x/y to be mix padded (masks in the middle), and verify
# only cpu kernel can be selected.
x_train[:, -3, :] = 0
y_train[:, -3] = 0
_, runtime_value = model.predict(x_train)
self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU)
@testing_utils.run_v2_only
def test_GRU_runtime_with_cond(self):
# This test is to demonstrate the graph rewrite of grappler plugin under
# the condition that the function returns different number of internal
# states.
layer = rnn.GRU(self.rnn_state_size, return_runtime=True)
inputs = keras.layers.Input(
shape=[self.timestep, self.input_shape], dtype=dtypes.float32)
zeros = array_ops.zeros([self.batch, self.output_shape])
dummy_runtime = rnn._runtime(rnn._RUNTIME_UNKNOWN)
a = constant_op.constant(0)
b = constant_op.constant(1)
# Will always run the GRU layer.
outputs, runtime = control_flow_ops.cond(
gen_math_ops.less(a, b),
lambda: layer(inputs),
lambda: (zeros, dummy_runtime))
# Expand the runtime so that it is a 1D tensor instead of scalar.
# TF model does not work with scalar model output, specially during
# aggregation.
runtime = keras.layers.Lambda(
lambda x: array_ops.expand_dims(x, axis=-1))(runtime)
model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime])
self._test_runtime_with_model(model)
if __name__ == '__main__':
test.main()
|
{
"content_hash": "2610fa09e075faddb9599c7db63bc9cc",
"timestamp": "",
"source": "github",
"line_count": 816,
"max_line_length": 80,
"avg_line_length": 36.245098039215684,
"alnum_prop": 0.6551257776575602,
"repo_name": "Intel-Corporation/tensorflow",
"id": "177bed37ab568bbd63509c67e0c9bc421d30a25d",
"size": "30265",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/layers/gru_v2_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7481"
},
{
"name": "C",
"bytes": "183416"
},
{
"name": "C++",
"bytes": "24549804"
},
{
"name": "CMake",
"bytes": "160888"
},
{
"name": "Go",
"bytes": "849081"
},
{
"name": "HTML",
"bytes": "681293"
},
{
"name": "Java",
"bytes": "307123"
},
{
"name": "Jupyter Notebook",
"bytes": "1833659"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37393"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64142"
},
{
"name": "Protocol Buffer",
"bytes": "218430"
},
{
"name": "Python",
"bytes": "21875003"
},
{
"name": "Shell",
"bytes": "337846"
},
{
"name": "TypeScript",
"bytes": "849555"
}
],
"symlink_target": ""
}
|
import logging
try:
from urllib import urlencode
from urlparse import urlsplit, parse_qs, urlunsplit
except ImportError:
from urllib.parse import urlsplit, parse_qs, urlunsplit, urlencode
from Cryptodome.PublicKey import RSA
from django.contrib.auth.views import (
redirect_to_login,
logout,
)
import django
if django.VERSION >= (1, 11):
from django.urls import reverse
else:
from django.core.urlresolvers import reverse
from django.contrib.auth import logout as django_user_logout
from django.http import JsonResponse
from django.shortcuts import render
from django.template.loader import render_to_string
from django.utils.decorators import method_decorator
from django.views.decorators.clickjacking import xframe_options_exempt
from django.views.decorators.http import require_http_methods
from django.views.generic import View
from jwkest import long_to_base64
from oidc_provider.lib.claims import StandardScopeClaims
from oidc_provider.lib.endpoints.authorize import AuthorizeEndpoint
from oidc_provider.lib.endpoints.token import TokenEndpoint
from oidc_provider.lib.errors import (
AuthorizeError,
ClientIdError,
RedirectUriError,
TokenError,
UserAuthError)
from oidc_provider.lib.utils.common import (
redirect,
get_site_url,
get_issuer,
)
from oidc_provider.lib.utils.oauth2 import protected_resource_view
from oidc_provider.lib.utils.token import client_id_from_id_token
from oidc_provider.models import (
Client,
RESPONSE_TYPE_CHOICES,
RSAKey,
)
from oidc_provider import settings
from oidc_provider import signals
logger = logging.getLogger(__name__)
OIDC_TEMPLATES = settings.get('OIDC_TEMPLATES')
class AuthorizeView(View):
def get(self, request, *args, **kwargs):
authorize = AuthorizeEndpoint(request)
try:
authorize.validate_params()
if request.user.is_authenticated():
# Check if there's a hook setted.
hook_resp = settings.get('OIDC_AFTER_USERLOGIN_HOOK', import_str=True)(
request=request, user=request.user,
client=authorize.client)
if hook_resp:
return hook_resp
if 'login' in authorize.params['prompt']:
if 'none' in authorize.params['prompt']:
raise AuthorizeError(authorize.params['redirect_uri'], 'login_required', authorize.grant_type)
else:
django_user_logout(request)
next_page = self.strip_prompt_login(request.get_full_path())
return redirect_to_login(next_page, settings.get('OIDC_LOGIN_URL'))
if 'select_account' in authorize.params['prompt']:
# TODO: see how we can support multiple accounts for the end-user.
if 'none' in authorize.params['prompt']:
raise AuthorizeError(authorize.params['redirect_uri'], 'account_selection_required', authorize.grant_type)
else:
django_user_logout(request)
return redirect_to_login(request.get_full_path(), settings.get('OIDC_LOGIN_URL'))
if {'none', 'consent'}.issubset(authorize.params['prompt']):
raise AuthorizeError(authorize.params['redirect_uri'], 'consent_required', authorize.grant_type)
implicit_flow_resp_types = set(['id_token', 'id_token token'])
allow_skipping_consent = (
authorize.client.client_type != 'public' or
authorize.client.response_type in implicit_flow_resp_types)
if not authorize.client.require_consent and (
allow_skipping_consent and
'consent' not in authorize.params['prompt']):
return redirect(authorize.create_response_uri())
if authorize.client.reuse_consent:
# Check if user previously give consent.
if authorize.client_has_user_consent() and (
allow_skipping_consent and
'consent' not in authorize.params['prompt']):
return redirect(authorize.create_response_uri())
if 'none' in authorize.params['prompt']:
raise AuthorizeError(authorize.params['redirect_uri'], 'consent_required', authorize.grant_type)
# Generate hidden inputs for the form.
context = {
'params': authorize.params,
}
hidden_inputs = render_to_string('oidc_provider/hidden_inputs.html', context)
# Remove `openid` from scope list
# since we don't need to print it.
if 'openid' in authorize.params['scope']:
authorize.params['scope'].remove('openid')
context = {
'client': authorize.client,
'hidden_inputs': hidden_inputs,
'params': authorize.params,
'scopes': authorize.get_scopes_information(),
}
return render(request, OIDC_TEMPLATES['authorize'], context)
else:
if 'none' in authorize.params['prompt']:
raise AuthorizeError(authorize.params['redirect_uri'], 'login_required', authorize.grant_type)
if 'login' in authorize.params['prompt']:
next_page = self.strip_prompt_login(request.get_full_path())
return redirect_to_login(next_page, settings.get('OIDC_LOGIN_URL'))
return redirect_to_login(request.get_full_path(), settings.get('OIDC_LOGIN_URL'))
except (ClientIdError, RedirectUriError) as error:
context = {
'error': error.error,
'description': error.description,
}
return render(request, OIDC_TEMPLATES['error'], context)
except AuthorizeError as error:
uri = error.create_uri(
authorize.params['redirect_uri'],
authorize.params['state'])
return redirect(uri)
def post(self, request, *args, **kwargs):
authorize = AuthorizeEndpoint(request)
try:
authorize.validate_params()
if not request.POST.get('allow'):
signals.user_decline_consent.send(self.__class__, user=request.user, client=authorize.client, scope=authorize.params['scope'])
raise AuthorizeError(authorize.params['redirect_uri'],
'access_denied',
authorize.grant_type)
signals.user_accept_consent.send(self.__class__, user=request.user, client=authorize.client, scope=authorize.params['scope'])
# Save the user consent given to the client.
authorize.set_client_user_consent()
uri = authorize.create_response_uri()
return redirect(uri)
except (AuthorizeError) as error:
uri = error.create_uri(
authorize.params['redirect_uri'],
authorize.params['state'])
return redirect(uri)
@staticmethod
def strip_prompt_login(path):
"""
Strips 'login' from the 'prompt' query parameter.
"""
uri = urlsplit(path)
query_params = parse_qs(uri.query)
if 'login' in query_params['prompt']:
query_params['prompt'].remove('login')
if not query_params['prompt']:
del query_params['prompt']
uri = uri._replace(query=urlencode(query_params, doseq=True))
return urlunsplit(uri)
class TokenView(View):
def post(self, request, *args, **kwargs):
token = TokenEndpoint(request)
try:
token.validate_params()
dic = token.create_response_dic()
return TokenEndpoint.response(dic)
except TokenError as error:
return TokenEndpoint.response(error.create_dict(), status=400)
except UserAuthError as error:
return TokenEndpoint.response(error.create_dict(), status=403)
@require_http_methods(['GET', 'POST'])
@protected_resource_view(['openid'])
def userinfo(request, *args, **kwargs):
"""
Create a diccionary with all the requested claims about the End-User.
See: http://openid.net/specs/openid-connect-core-1_0.html#UserInfoResponse
Return a diccionary.
"""
token = kwargs['token']
dic = {
'sub': token.id_token.get('sub'),
}
standard_claims = StandardScopeClaims(token)
dic.update(standard_claims.create_response_dic())
if settings.get('OIDC_EXTRA_SCOPE_CLAIMS'):
extra_claims = settings.get('OIDC_EXTRA_SCOPE_CLAIMS', import_str=True)(token)
dic.update(extra_claims.create_response_dic())
response = JsonResponse(dic, status=200)
response['Access-Control-Allow-Origin'] = '*'
response['Cache-Control'] = 'no-store'
response['Pragma'] = 'no-cache'
return response
class ProviderInfoView(View):
def get(self, request, *args, **kwargs):
dic = dict()
site_url = get_site_url(request=request)
dic['issuer'] = get_issuer(site_url=site_url, request=request)
dic['authorization_endpoint'] = site_url + reverse('oidc_provider:authorize')
dic['token_endpoint'] = site_url + reverse('oidc_provider:token')
dic['userinfo_endpoint'] = site_url + reverse('oidc_provider:userinfo')
dic['end_session_endpoint'] = site_url + reverse('oidc_provider:end-session')
types_supported = [x[0] for x in RESPONSE_TYPE_CHOICES]
dic['response_types_supported'] = types_supported
dic['jwks_uri'] = site_url + reverse('oidc_provider:jwks')
dic['id_token_signing_alg_values_supported'] = ['HS256', 'RS256']
# See: http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes
dic['subject_types_supported'] = ['public']
dic['token_endpoint_auth_methods_supported'] = ['client_secret_post',
'client_secret_basic']
if settings.get('OIDC_SESSION_MANAGEMENT_ENABLE'):
dic['check_session_iframe'] = site_url + reverse('oidc_provider:check-session-iframe')
response = JsonResponse(dic)
response['Access-Control-Allow-Origin'] = '*'
return response
class JwksView(View):
def get(self, request, *args, **kwargs):
dic = dict(keys=[])
for rsakey in RSAKey.objects.all():
public_key = RSA.importKey(rsakey.key).publickey()
dic['keys'].append({
'kty': 'RSA',
'alg': 'RS256',
'use': 'sig',
'kid': rsakey.kid,
'n': long_to_base64(public_key.n),
'e': long_to_base64(public_key.e),
})
response = JsonResponse(dic)
response['Access-Control-Allow-Origin'] = '*'
return response
class EndSessionView(View):
def get(self, request, *args, **kwargs):
id_token_hint = request.GET.get('id_token_hint', '')
post_logout_redirect_uri = request.GET.get('post_logout_redirect_uri', '')
state = request.GET.get('state', '')
client = None
next_page = settings.get('OIDC_LOGIN_URL')
after_end_session_hook = settings.get('OIDC_AFTER_END_SESSION_HOOK', import_str=True)
if id_token_hint:
client_id = client_id_from_id_token(id_token_hint)
try:
client = Client.objects.get(client_id=client_id)
if post_logout_redirect_uri in client.post_logout_redirect_uris:
if state:
uri = urlsplit(post_logout_redirect_uri)
query_params = parse_qs(uri.query)
query_params['state'] = state
uri = uri._replace(query=urlencode(query_params, doseq=True))
next_page = urlunsplit(uri)
else:
next_page = post_logout_redirect_uri
except Client.DoesNotExist:
pass
after_end_session_hook(
request=request,
id_token=id_token_hint,
post_logout_redirect_uri=post_logout_redirect_uri,
state=state,
client=client,
next_page=next_page
)
return logout(request, next_page=next_page)
class CheckSessionIframeView(View):
@method_decorator(xframe_options_exempt)
def dispatch(self, request, *args, **kwargs):
return super(CheckSessionIframeView, self).dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
return render(request, 'oidc_provider/check_session_iframe.html', kwargs)
|
{
"content_hash": "64fb13f5dae8e398ae3031048fa96872",
"timestamp": "",
"source": "github",
"line_count": 348,
"max_line_length": 142,
"avg_line_length": 37.439655172413794,
"alnum_prop": 0.593445391050733,
"repo_name": "bunnyinc/django-oidc-provider",
"id": "f1b90f09958fae9c238884ca0eadabc356107912",
"size": "13029",
"binary": false,
"copies": "3",
"ref": "refs/heads/v0.5.x",
"path": "oidc_provider/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "8706"
},
{
"name": "Python",
"bytes": "157098"
}
],
"symlink_target": ""
}
|
"""
This module provides a client class for SMS.
"""
import copy
import logging
import json
from baidubce import utils
from baidubce.auth import bce_v1_signer
from baidubce.bce_base_client import BceBaseClient
from baidubce.http import bce_http_client
from baidubce.http import handler
from baidubce.http import http_headers
from baidubce.http import http_methods
from baidubce.utils import required
from baidubce.services import sms
import httplib
from baidubce.exception import BceClientError
from baidubce.exception import BceServerError
from baidubce.bce_client_configuration import BceClientConfiguration
_logger = logging.getLogger(__name__)
def _parse_result(http_response, response):
if http_response.status / 100 == httplib.CONTINUE / 100:
raise BceClientError('Can not handle 1xx http status code')
bse = None
body = http_response.read()
if body:
d = json.loads(body)
if 'message' in d and 'code' in d and 'requestId' in d:
bse = BceServerError(d['message'], code=d['code'], request_id=d['requestId'])
elif http_response.status / 100 == httplib.OK / 100:
response.__dict__.update(json.loads(body, \
object_hook=utils.dict_to_python_object).__dict__)
http_response.close()
return True
elif http_response.status / 100 == httplib.OK / 100:
return True
if bse is None:
bse = BceServerError(http_response.reason, request_id=response.metadata.bce_request_id)
bse.status_code = http_response.status
raise bse
class SmsClient(BceBaseClient):
"""
Sms sdk client
"""
def __init__(self, config=None):
if config is not None:
self._check_config_type(config)
BceBaseClient.__init__(self, config)
@required(config=BceClientConfiguration)
def _check_config_type(self, config):
return True
@required(template_id=(str, unicode),
receiver_list=list,
content_var_dict=dict)
def send_message(self, template_id, receiver_list, content_var_dict, config=None):
"""
send a short message to a group of users
:param template_id: template id used to send this message
:type template_id: string or unicode
:param receiver_list: receivers to which this message will be send
:type reciver_list: list
:param content_var_dict: variable values to be replaced
:type content_var_dict: dict
:param config: None
:type config: BceClientConfiguration
:return: message result as following format
{
"messageId": "123456789abefghiqwertioplkjhgfds",
"sendStat": {
"sendCount":2,
"successCount":1,
"failList":["13800138001", "13800138000"]
}
}
:rtype: baidubce.bce_response.BceResponse
"""
data = {
'templateId': template_id,
'receiver': receiver_list,
'contentVar': json.dumps(content_var_dict)
}
return self._send_request(http_methods.POST, 'message', \
body=json.dumps(data), config=config)
@required(message_id=(str, unicode))
def query_message_detail(self, message_id, config=None):
"""
Get the message detail.
:param message_id: the id of message to be queried
:type message_id: string or unicode
:param config: None
:type config: BceClientConfiguration
:return: detailed message as following format
{
'messageId': '123456789abefghiqwertioplkjhgfds',
'content': 'this is JDMALL, your code is 123456',
'receiver': ['13800138000'],
'sendTime': '2014-06-12T10:08:22Z'
}
:rtype: baidubce.bce_response.BceResponse
"""
return self._send_request(http_methods.GET, 'message', message_id, config=config)
@required(name=(str, unicode), content=(str, unicode))
def create_template(self, name, content, config=None):
"""
Create template with specific name and content
:param name: the name of template
:type name: string or unicode
:param content: the content of template,such as 'this is ${APP}, your code is ${VID}'
:type content: string or unicode
:param config: None
:type config: BceClientConfiguration
:return: create result as following format
{
'templateId': 'brn:bce:sms:cn-n1:123456:smsTpl:6nHdNumZ4ZtGaKO'
}
:rtype: baidubce.bce_response.BceResponse
"""
data = {'name': name, 'content': content}
return self._send_request(http_methods.POST, 'template', \
body=json.dumps(data), config=config)
@required(template_id=(str, unicode))
def delete_template(self, template_id, config=None):
"""
delete an existing template by given id
:param template_id: id of template to be deleted
:type template_id: string or unicode
:param config: None
:type config: BceClientConfiguration
:return: None
"""
self._send_request(http_methods.DELETE, 'template', template_id, config=config)
@required(template_id=(str, unicode))
def get_template_detail(self, template_id, config=None):
"""
get detailed information of template by id
:param template_id: the template id to be queried
:type template_id: string or unicode
:param config: None
:type config: BceClientConfiguration
:return: detailed template as following format
{
'templateId': 'smsTpl:6nHdNumZ4ZtGaKO',
'name: 'verifyID',
'content': 'this is ${APP}, your code is ${VID}',
'status: 'VALID',
'createTime': '2014-06-12T10:08:22Z',
'updateTime': '2014-06-12T10:08:22Z'
}
:rtype: baidubce.bce_response.BceResponse
"""
return self._send_request(http_methods.GET, 'template', template_id, config=config)
def get_template_list(self, config=None):
"""
query all templates
:param config: None
:type config: BceClientConfiguration
:return: template list as following format
{
"templateList": {
'templateId': 'smsTpl:6nHdNumZ4ZtGaKO',
'name: 'verifyID',
'content': 'this is ${APP}, your code is ${VID}',
'status: 'VALID',
'createTime': '2014-06-12T10:08:22Z',
'updateTime': '2014-06-12T10:08:22Z'
},
...
}
:rtype: baidubce.bce_response.BceResponse
"""
return self._send_request(http_methods.GET, 'template', config=config)
def query_quota(self, config=None):
"""
query quota information of user
:param config: None
:type config: BceClientConfiguration
:return: quota information as following format
{
'maxSendPerDay': 10000,
'maxReceivePerPhoneNumberDay': 10,
'sentToday': 8000
}
:rtype: baidubce.bce_response.BceResponse
"""
return self._send_request(http_methods.GET, 'quota', config=config)
@required(receiver=(str, unicode))
def stat_receiver(self, receiver, config=None):
"""
query quota information of receiver
:param receiver: receiver to be queried
:type receiver: string or unicode
:param config: None
:type config: BceClientConfiguration
:return: quota information as following format
{
'maxReceivePerPhoneNumberDay': 10,
'receivedToday': 8
}
:rtype: baidubce.bce_response.BceResponse
"""
return self._send_request(http_methods.GET, 'receiver', receiver, config=config)
@staticmethod
def _get_path(config, function_name=None, key=None):
return utils.append_uri(sms.URL_PREFIX, function_name, key)
@staticmethod
def _bce_sms_sign(credentials, http_method, path, headers, params,
timestamp=0, expiration_in_seconds=1800,
headers_to_sign=None):
headers_to_sign_list = ["host",
"content-md5",
"content-length",
"content-type"]
if headers_to_sign is None or len(headers_to_sign) == 0:
headers_to_sign = []
for k in headers:
k_lower = k.strip().lower()
if k_lower.startswith(http_headers.BCE_PREFIX) or k_lower in headers_to_sign_list:
headers_to_sign.append(k_lower)
headers_to_sign.sort()
else:
for k in headers:
k_lower = k.strip().lower()
if k_lower.startswith(http_headers.BCE_PREFIX):
headers_to_sign.append(k_lower)
headers_to_sign.sort()
return bce_v1_signer.sign(credentials,
http_method,
path,
headers,
params,
timestamp,
expiration_in_seconds,
headers_to_sign)
def _merge_config(self, config):
if config is None:
return self.config
else:
self._check_config_type(config)
new_config = copy.copy(self.config)
new_config.merge_non_none_values(config)
return new_config
def _send_request(
self, http_method, function_name=None, key=None,
body=None, headers=None, params=None,
config=None,
body_parser=None):
config = self._merge_config(config)
path = SmsClient._get_path(config, function_name, key)
if body_parser is None:
body_parser = _parse_result
if headers is None:
headers = {'Accept': '*/*', 'Content-Type': 'application/json;charset=utf-8'}
return bce_http_client.send_request(
config, SmsClient._bce_sms_sign, [body_parser],
http_method, path, body, headers, params)
|
{
"content_hash": "6720bad7b54b7f995778c833e07b8b85",
"timestamp": "",
"source": "github",
"line_count": 312,
"max_line_length": 98,
"avg_line_length": 35.032051282051285,
"alnum_prop": 0.5534309240622141,
"repo_name": "smices/mWorkerService",
"id": "b4a67d7265368658363aced5533c4aeef6f8b535",
"size": "11561",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/lib/baidubce/services/sms/sms_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "327675"
},
{
"name": "Shell",
"bytes": "4057"
}
],
"symlink_target": ""
}
|
"""Heat API Server.
An OpenStack ReST API to Heat.
"""
import eventlet
eventlet.monkey_patch(os=False)
import sys
from oslo_config import cfg
import oslo_i18n as i18n
from oslo_log import log as logging
from oslo_reports import guru_meditation_report as gmr
from oslo_service import systemd
import six
from heat.common import config
from heat.common.i18n import _LI
from heat.common import messaging
from heat.common import profiler
from heat.common import wsgi
from heat import version
i18n.enable_lazy()
LOG = logging.getLogger('heat.api')
def main():
try:
logging.register_options(cfg.CONF)
cfg.CONF(project='heat', prog='heat-api',
version=version.version_info.version_string())
logging.setup(cfg.CONF, 'heat-api')
messaging.setup()
app = config.load_paste_app()
port = cfg.CONF.heat_api.bind_port
host = cfg.CONF.heat_api.bind_host
LOG.info(_LI('Starting Heat REST API on %(host)s:%(port)s'),
{'host': host, 'port': port})
profiler.setup('heat-api', host)
gmr.TextGuruMeditation.setup_autorun(version)
server = wsgi.Server('heat-api', cfg.CONF.heat_api)
server.start(app, default_port=port)
systemd.notify_once()
server.wait()
except RuntimeError as e:
msg = six.text_type(e)
sys.exit("ERROR: %s" % msg)
|
{
"content_hash": "d103c2b9240f392e94516cb875802916",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 68,
"avg_line_length": 26.73076923076923,
"alnum_prop": 0.6611510791366907,
"repo_name": "pratikmallya/heat",
"id": "b5a751814243717d703e10bb160ab5c7a72b52b0",
"size": "1987",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "heat/cmd/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6929579"
},
{
"name": "Shell",
"bytes": "33092"
}
],
"symlink_target": ""
}
|
"""Support for Homekit climate devices."""
import logging
from homeassistant.components.climate import ClimateDevice
from homeassistant.components.climate.const import (
STATE_COOL, STATE_HEAT, STATE_IDLE, SUPPORT_OPERATION_MODE,
SUPPORT_TARGET_TEMPERATURE, SUPPORT_TARGET_HUMIDITY)
from homeassistant.const import ATTR_TEMPERATURE, STATE_OFF, TEMP_CELSIUS
from . import KNOWN_DEVICES, HomeKitEntity
_LOGGER = logging.getLogger(__name__)
# Map of Homekit operation modes to hass modes
MODE_HOMEKIT_TO_HASS = {
0: STATE_OFF,
1: STATE_HEAT,
2: STATE_COOL,
}
# Map of hass operation modes to homekit modes
MODE_HASS_TO_HOMEKIT = {v: k for k, v in MODE_HOMEKIT_TO_HASS.items()}
DEFAULT_VALID_MODES = list(MODE_HOMEKIT_TO_HASS)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up Homekit climate."""
if discovery_info is not None:
accessory = hass.data[KNOWN_DEVICES][discovery_info['serial']]
add_entities([HomeKitClimateDevice(accessory, discovery_info)], True)
class HomeKitClimateDevice(HomeKitEntity, ClimateDevice):
"""Representation of a Homekit climate device."""
def __init__(self, *args):
"""Initialise the device."""
self._state = None
self._current_mode = None
self._valid_modes = []
self._current_temp = None
self._target_temp = None
self._current_humidity = None
self._target_humidity = None
super().__init__(*args)
def get_characteristic_types(self):
"""Define the homekit characteristics the entity cares about."""
# pylint: disable=import-error
from homekit.model.characteristics import CharacteristicsTypes
return [
CharacteristicsTypes.HEATING_COOLING_CURRENT,
CharacteristicsTypes.HEATING_COOLING_TARGET,
CharacteristicsTypes.TEMPERATURE_CURRENT,
CharacteristicsTypes.TEMPERATURE_TARGET,
CharacteristicsTypes.RELATIVE_HUMIDITY_CURRENT,
CharacteristicsTypes.RELATIVE_HUMIDITY_TARGET,
]
def _setup_heating_cooling_target(self, characteristic):
self._features |= SUPPORT_OPERATION_MODE
if 'valid-values' in characteristic:
valid_values = [
val for val in DEFAULT_VALID_MODES
if val in characteristic['valid-values']
]
else:
valid_values = DEFAULT_VALID_MODES
if 'minValue' in characteristic:
valid_values = [
val for val in valid_values
if val >= characteristic['minValue']
]
if 'maxValue' in characteristic:
valid_values = [
val for val in valid_values
if val <= characteristic['maxValue']
]
self._valid_modes = [
MODE_HOMEKIT_TO_HASS[mode] for mode in valid_values
]
def _setup_temperature_target(self, characteristic):
self._features |= SUPPORT_TARGET_TEMPERATURE
def _setup_relative_humidity_target(self, characteristic):
self._features |= SUPPORT_TARGET_HUMIDITY
def _update_heating_cooling_current(self, value):
self._state = MODE_HOMEKIT_TO_HASS.get(value)
def _update_heating_cooling_target(self, value):
self._current_mode = MODE_HOMEKIT_TO_HASS.get(value)
def _update_temperature_current(self, value):
self._current_temp = value
def _update_temperature_target(self, value):
self._target_temp = value
def _update_relative_humidity_current(self, value):
self._current_humidity = value
def _update_relative_humidity_target(self, value):
self._target_humidity = value
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
temp = kwargs.get(ATTR_TEMPERATURE)
characteristics = [{'aid': self._aid,
'iid': self._chars['temperature.target'],
'value': temp}]
await self._accessory.put_characteristics(characteristics)
async def async_set_humidity(self, humidity):
"""Set new target humidity."""
characteristics = [{'aid': self._aid,
'iid': self._chars['relative-humidity.target'],
'value': humidity}]
await self._accessory.put_characteristics(characteristics)
async def async_set_operation_mode(self, operation_mode):
"""Set new target operation mode."""
characteristics = [{'aid': self._aid,
'iid': self._chars['heating-cooling.target'],
'value': MODE_HASS_TO_HOMEKIT[operation_mode]}]
await self._accessory.put_characteristics(characteristics)
@property
def state(self):
"""Return the current state."""
# If the device reports its operating mode as off, it sometimes doesn't
# report a new state.
if self._current_mode == STATE_OFF:
return STATE_OFF
if self._state == STATE_OFF and self._current_mode != STATE_OFF:
return STATE_IDLE
return self._state
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temp
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temp
@property
def current_humidity(self):
"""Return the current humidity."""
return self._current_humidity
@property
def target_humidity(self):
"""Return the humidity we try to reach."""
return self._target_humidity
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
return self._current_mode
@property
def operation_list(self):
"""Return the list of available operation modes."""
return self._valid_modes
@property
def supported_features(self):
"""Return the list of supported features."""
return self._features
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
|
{
"content_hash": "f53657d666ffbb5bbdc8615030e44b83",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 79,
"avg_line_length": 34.32786885245902,
"alnum_prop": 0.6203438395415473,
"repo_name": "auduny/home-assistant",
"id": "2cbd8f6d7000d7b28f085e8a3ef1551d788e6542",
"size": "6282",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/homekit_controller/climate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "HCL",
"bytes": "407"
},
{
"name": "Python",
"bytes": "15129018"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
}
|
"""Utilities for meta-estimators"""
# Author: Joel Nothman
# Andreas Mueller
# License: BSD
from abc import ABCMeta, abstractmethod
from operator import attrgetter
from functools import update_wrapper
import numpy as np
from ..utils import safe_indexing
from ..externals import six
from ..base import BaseEstimator
__all__ = ['if_delegate_has_method']
class _BaseComposition(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Handles parameter management for classifiers composed of named estimators.
"""
@abstractmethod
def __init__(self):
pass
def _get_params(self, attr, deep=True):
out = super(_BaseComposition, self).get_params(deep=deep)
if not deep:
return out
estimators = getattr(self, attr)
out.update(estimators)
for name, estimator in estimators:
if hasattr(estimator, 'get_params'):
for key, value in six.iteritems(
estimator.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
def _set_params(self, attr, **params):
# Ensure strict ordering of parameter setting:
# 1. All steps
if attr in params:
setattr(self, attr, params.pop(attr))
# 2. Step replacement
items = getattr(self, attr)
names = []
if items:
names, _ = zip(*items)
for name in list(six.iterkeys(params)):
if '__' not in name and name in names:
self._replace_estimator(attr, name, params.pop(name))
# 3. Step parameters and other initialisation arguments
super(_BaseComposition, self).set_params(**params)
return self
def _replace_estimator(self, attr, name, new_val):
# assumes `name` is a valid estimator name
new_estimators = list(getattr(self, attr))
for i, (estimator_name, _) in enumerate(new_estimators):
if estimator_name == name:
new_estimators[i] = (name, new_val)
break
setattr(self, attr, new_estimators)
def _validate_names(self, names):
if len(set(names)) != len(names):
raise ValueError('Names provided are not unique: '
'{0!r}'.format(list(names)))
invalid_names = set(names).intersection(self.get_params(deep=False))
if invalid_names:
raise ValueError('Estimator names conflict with constructor '
'arguments: {0!r}'.format(sorted(invalid_names)))
invalid_names = [name for name in names if '__' in name]
if invalid_names:
raise ValueError('Estimator names must not contain __: got '
'{0!r}'.format(invalid_names))
class _IffHasAttrDescriptor(object):
"""Implements a conditional property using the descriptor protocol.
Using this class to create a decorator will raise an ``AttributeError``
if none of the delegates (specified in ``delegate_names``) is an attribute
of the base object or the first found delegate does not have an attribute
``attribute_name``.
This allows ducktyping of the decorated method based on
``delegate.attribute_name``. Here ``delegate`` is the first item in
``delegate_names`` for which ``hasattr(object, delegate) is True``.
See https://docs.python.org/3/howto/descriptor.html for an explanation of
descriptors.
"""
def __init__(self, fn, delegate_names, attribute_name):
self.fn = fn
self.delegate_names = delegate_names
self.attribute_name = attribute_name
# update the docstring of the descriptor
update_wrapper(self, fn)
def __get__(self, obj, type=None):
# raise an AttributeError if the attribute is not present on the object
if obj is not None:
# delegate only on instances, not the classes.
# this is to allow access to the docstrings.
for delegate_name in self.delegate_names:
try:
delegate = attrgetter(delegate_name)(obj)
except AttributeError:
continue
else:
getattr(delegate, self.attribute_name)
break
else:
attrgetter(self.delegate_names[-1])(obj)
# lambda, but not partial, allows help() to work with update_wrapper
out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)
# update the docstring of the returned function
update_wrapper(out, self.fn)
return out
def if_delegate_has_method(delegate):
"""Create a decorator for methods that are delegated to a sub-estimator
This enables ducktyping by hasattr returning True according to the
sub-estimator.
Parameters
----------
delegate : string, list of strings or tuple of strings
Name of the sub-estimator that can be accessed as an attribute of the
base object. If a list or a tuple of names are provided, the first
sub-estimator that is an attribute of the base object will be used.
"""
if isinstance(delegate, list):
delegate = tuple(delegate)
if not isinstance(delegate, tuple):
delegate = (delegate,)
return lambda fn: _IffHasAttrDescriptor(fn, delegate,
attribute_name=fn.__name__)
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels.
Slice X, y according to indices for cross-validation, but take care of
precomputed kernel-matrices or pairwise affinities / distances.
If ``estimator._pairwise is True``, X needs to be square and
we slice rows and columns. If ``train_indices`` is not None,
we slice rows using ``indices`` (assumed the test set) and columns
using ``train_indices``, indicating the training set.
Labels y will always be indexed only along the first axis.
Parameters
----------
estimator : object
Estimator to determine whether we should slice only rows or rows and
columns.
X : array-like, sparse matrix or iterable
Data to be indexed. If ``estimator._pairwise is True``,
this needs to be a square array-like or sparse matrix.
y : array-like, sparse matrix or iterable
Targets to be indexed.
indices : array of int
Rows to select from X and y.
If ``estimator._pairwise is True`` and ``train_indices is None``
then ``indices`` will also be used to slice columns.
train_indices : array of int or None, default=None
If ``estimator._pairwise is True`` and ``train_indices is not None``,
then ``train_indices`` will be use to slice the columns of X.
Returns
-------
X_subset : array-like, sparse matrix or list
Indexed data.
y_subset : array-like, sparse matrix or list
Indexed targets.
"""
if getattr(estimator, "_pairwise", False):
if not hasattr(X, "shape"):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
|
{
"content_hash": "43ff2f74c4343aee907485b7791cbcc6",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 81,
"avg_line_length": 36.90952380952381,
"alnum_prop": 0.6154044639401367,
"repo_name": "vortex-ape/scikit-learn",
"id": "49b059b3245951c27498a6284d88c8318bc18a28",
"size": "7751",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "sklearn/utils/metaestimators.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "394787"
},
{
"name": "C++",
"bytes": "140225"
},
{
"name": "Makefile",
"bytes": "1588"
},
{
"name": "PowerShell",
"bytes": "17312"
},
{
"name": "Python",
"bytes": "6351428"
},
{
"name": "Shell",
"bytes": "8687"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import six
import mock
import pytest
from django.core.exceptions import SuspiciousOperation
from sentry.constants import VERSION_LENGTH
from uuid import UUID
from sentry.coreapi import (
APIError,
APIUnauthorized,
Auth,
ClientApiHelper,
CspApiHelper,
APIForbidden,
)
from sentry.event_manager import EventManager
from sentry.interfaces.base import get_interface
from sentry.testutils import TestCase
class BaseAPITest(TestCase):
helper_cls = ClientApiHelper
def setUp(self):
self.user = self.create_user('coreapi@example.com')
self.team = self.create_team(name='Foo')
self.project = self.create_project(team=self.team)
self.pk = self.project.key_set.get_or_create()[0]
self.helper = self.helper_cls(agent='Awesome Browser', ip_address='198.51.100.0')
def validate_and_normalize(self, data, request_env=None):
data = self.helper.validate_data(data)
return EventManager(data).normalize(request_env=request_env)
class AuthFromRequestTest(BaseAPITest):
def test_valid(self):
request = mock.Mock()
request.META = {'HTTP_X_SENTRY_AUTH': 'Sentry sentry_key=value, biz=baz'}
request.GET = {}
result = self.helper.auth_from_request(request)
assert result.public_key == 'value'
def test_valid_missing_space(self):
request = mock.Mock()
request.META = {'HTTP_X_SENTRY_AUTH': 'Sentry sentry_key=value,biz=baz'}
request.GET = {}
result = self.helper.auth_from_request(request)
assert result.public_key == 'value'
def test_valid_ignore_case(self):
request = mock.Mock()
request.META = {'HTTP_X_SENTRY_AUTH': 'SeNtRy sentry_key=value, biz=baz'}
request.GET = {}
result = self.helper.auth_from_request(request)
assert result.public_key == 'value'
def test_invalid_header_defers_to_GET(self):
request = mock.Mock()
request.META = {'HTTP_X_SENTRY_AUTH': 'foobar'}
request.GET = {'sentry_version': '1', 'foo': 'bar'}
result = self.helper.auth_from_request(request)
assert result.version == '1'
def test_invalid_legacy_header_defers_to_GET(self):
request = mock.Mock()
request.META = {'HTTP_AUTHORIZATION': 'foobar'}
request.GET = {'sentry_version': '1', 'foo': 'bar'}
result = self.helper.auth_from_request(request)
assert result.version == '1'
def test_invalid_header_bad_token(self):
request = mock.Mock()
request.META = {'HTTP_X_SENTRY_AUTH': 'Sentryfoo'}
request.GET = {}
with self.assertRaises(APIUnauthorized):
self.helper.auth_from_request(request)
def test_invalid_header_missing_pair(self):
request = mock.Mock()
request.META = {'HTTP_X_SENTRY_AUTH': 'Sentry foo'}
request.GET = {}
with self.assertRaises(APIUnauthorized):
self.helper.auth_from_request(request)
def test_invalid_malformed_value(self):
request = mock.Mock()
request.META = {'HTTP_X_SENTRY_AUTH': 'Sentry sentry_key=value,,biz=baz'}
request.GET = {}
with self.assertRaises(APIUnauthorized):
self.helper.auth_from_request(request)
def test_multiple_auth_suspicious(self):
request = mock.Mock()
request.GET = {'sentry_version': '1', 'foo': 'bar'}
request.META = {'HTTP_X_SENTRY_AUTH': 'Sentry sentry_key=value, biz=baz'}
with pytest.raises(SuspiciousOperation):
self.helper.auth_from_request(request)
class ProjectIdFromAuthTest(BaseAPITest):
def test_invalid_if_missing_key(self):
self.assertRaises(APIUnauthorized, self.helper.project_id_from_auth, Auth({}))
def test_valid_with_key(self):
auth = Auth({'sentry_key': self.pk.public_key})
result = self.helper.project_id_from_auth(auth)
self.assertEquals(result, self.project.id)
def test_invalid_key(self):
auth = Auth({'sentry_key': 'z'})
self.assertRaises(APIUnauthorized, self.helper.project_id_from_auth, auth)
def test_invalid_secret(self):
auth = Auth({'sentry_key': self.pk.public_key, 'sentry_secret': 'z'})
self.assertRaises(APIUnauthorized, self.helper.project_id_from_auth, auth)
def test_nonascii_key(self):
auth = Auth({'sentry_key': '\xc3\xbc'})
self.assertRaises(APIUnauthorized, self.helper.project_id_from_auth, auth)
class ValidateDataTest(BaseAPITest):
@mock.patch('uuid.uuid4', return_value=UUID('031667ea1758441f92c7995a428d2d14'))
def test_empty_event_id(self, uuid4):
data = self.validate_and_normalize({
'event_id': '',
})
assert data['event_id'] == '031667ea1758441f92c7995a428d2d14'
@mock.patch('uuid.uuid4', return_value=UUID('031667ea1758441f92c7995a428d2d14'))
def test_missing_event_id(self, uuid4):
data = self.validate_and_normalize({})
assert data['event_id'] == '031667ea1758441f92c7995a428d2d14'
@mock.patch('uuid.uuid4', return_value=UUID('031667ea1758441f92c7995a428d2d14'))
def test_invalid_event_id(self, uuid4):
data = self.validate_and_normalize({
'event_id': 'a' * 33,
})
assert data['event_id'] == '031667ea1758441f92c7995a428d2d14'
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'value_too_long'
assert data['errors'][0]['name'] == 'event_id'
assert data['errors'][0]['value'] == 'a' * 33
data = self.validate_and_normalize({
'event_id': 'xyz',
})
assert data['event_id'] == '031667ea1758441f92c7995a428d2d14'
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'invalid_data'
assert data['errors'][0]['name'] == 'event_id'
assert data['errors'][0]['value'] == 'xyz'
def test_unknown_attribute(self):
data = self.validate_and_normalize({
'message': 'foo',
'foo': 'bar',
})
assert 'foo' not in data
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'invalid_attribute'
assert data['errors'][0]['name'] == 'foo'
def test_invalid_interface_name(self):
data = self.validate_and_normalize({
'message': 'foo',
'foo.baz': 'bar',
})
assert 'foo.baz' not in data
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'invalid_attribute'
assert data['errors'][0]['name'] == 'foo.baz'
def test_invalid_interface_import_path(self):
data = self.validate_and_normalize({
'message': 'foo',
'sentry.interfaces.Exception2': 'bar',
})
assert 'sentry.interfaces.Exception2' not in data
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'invalid_attribute'
assert data['errors'][0]['name'] == 'sentry.interfaces.Exception2'
def test_does_expand_list(self):
data = self.validate_and_normalize({
'message': 'foo',
'exception':
[{
'type': 'ValueError',
'value': 'hello world',
'module': 'foo.bar',
}]
})
assert 'sentry.interfaces.Exception' in data
def test_log_level_as_string(self):
data = self.validate_and_normalize({
'message': 'foo',
'level': 'error',
})
assert data['level'] == 40
def test_invalid_log_level(self):
data = self.validate_and_normalize({
'message': 'foo',
'level': 'foobar',
})
assert data['level'] == 40
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'invalid_data'
assert data['errors'][0]['name'] == 'level'
assert data['errors'][0]['value'] == 'foobar'
def test_tags_as_string(self):
data = self.validate_and_normalize({
'message': 'foo',
'tags': 'bar',
})
assert data['tags'] == []
def test_tags_with_spaces(self):
data = self.validate_and_normalize({
'message': 'foo',
'tags': {
'foo bar': 'baz bar'
},
})
assert data['tags'] == [('foo-bar', 'baz bar')]
def test_tags_out_of_bounds(self):
data = self.validate_and_normalize({
'message': 'foo',
'tags': {
'f' * 33: 'value',
'foo': 'v' * 201,
'bar': 'value'
},
})
assert data['tags'] == [('bar', 'value')]
assert len(data['errors']) == 2
def test_tags_as_invalid_pair(self):
data = self.validate_and_normalize({
'message': 'foo',
'tags': [('foo', 'bar'), ('biz', 'baz', 'boz')],
})
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'invalid_data'
assert data['errors'][0]['name'] == 'tags'
assert data['errors'][0]['value'] == [('foo', 'bar'), ('biz', 'baz', 'boz')]
def test_reserved_tags(self):
data = self.validate_and_normalize({
'message': 'foo',
'tags': [('foo', 'bar'), ('release', 'abc123')],
})
assert data['tags'] == [('foo', 'bar')]
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'invalid_data'
assert data['errors'][0]['name'] == 'tags'
assert data['errors'][0]['value'] == ('release', 'abc123')
def test_tag_value(self):
data = self.validate_and_normalize({
'message': 'foo',
'tags': [('foo', 'b\nar'), ('biz', 'baz')],
})
assert data['tags'] == [('biz', 'baz')]
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'invalid_data'
assert data['errors'][0]['name'] == 'tags'
assert data['errors'][0]['value'] == ('foo', 'b\nar')
def test_extra_as_string(self):
data = self.validate_and_normalize({
'message': 'foo',
'extra': 'bar',
})
assert data['extra'] == {}
def test_release_too_long(self):
data = self.validate_and_normalize({
'release': 'a' * (VERSION_LENGTH + 1),
})
assert not data.get('release')
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'value_too_long'
assert data['errors'][0]['name'] == 'release'
assert data['errors'][0]['value'] == 'a' * (VERSION_LENGTH + 1)
def test_release_as_non_string(self):
data = self.validate_and_normalize({
'release': 42,
})
assert data.get('release') == '42'
def test_distribution_too_long(self):
data = self.validate_and_normalize({
'release': 'a' * 62,
'dist': 'b' * 65,
})
assert not data.get('dist')
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'value_too_long'
assert data['errors'][0]['name'] == 'dist'
assert data['errors'][0]['value'] == 'b' * 65
def test_distribution_bad_char(self):
data = self.validate_and_normalize({
'release': 'a' * 62,
'dist': '^%',
})
assert not data.get('dist')
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'invalid_data'
assert data['errors'][0]['name'] == 'dist'
assert data['errors'][0]['value'] == '^%'
def test_distribution_strip(self):
data = self.validate_and_normalize({
'release': 'a' * 62,
'dist': ' foo ',
})
assert data.get('dist') == 'foo'
def test_distribution_as_non_string(self):
data = self.validate_and_normalize({
'release': '42',
'dist': 23,
})
assert data.get('release') == '42'
assert data.get('dist') == '23'
def test_distribution_no_release(self):
data = self.validate_and_normalize({
'dist': 23,
})
assert data.get('dist') is None
def test_valid_platform(self):
data = self.validate_and_normalize({
'platform': 'python',
})
assert data.get('platform') == 'python'
def test_no_platform(self):
data = self.validate_and_normalize({})
assert data.get('platform') == 'other'
def test_invalid_platform(self):
data = self.validate_and_normalize({
'platform': 'foobar',
})
assert data.get('platform') == 'other'
def test_environment_too_long(self):
data = self.validate_and_normalize({
'environment': 'a' * 65,
})
assert not data.get('environment')
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'value_too_long'
assert data['errors'][0]['name'] == 'environment'
assert data['errors'][0]['value'] == 'a' * 65
def test_environment_as_non_string(self):
data = self.validate_and_normalize({
'environment': 42,
})
assert data.get('environment') == '42'
def test_time_spent_too_large(self):
data = self.validate_and_normalize({
'time_spent': 2147483647 + 1,
})
assert not data.get('time_spent')
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'value_too_long'
assert data['errors'][0]['name'] == 'time_spent'
assert data['errors'][0]['value'] == 2147483647 + 1
def test_time_spent_invalid(self):
data = self.validate_and_normalize({
'time_spent': 'lol',
})
assert not data.get('time_spent')
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'invalid_data'
assert data['errors'][0]['name'] == 'time_spent'
assert data['errors'][0]['value'] == 'lol'
def test_time_spent_non_int(self):
data = self.validate_and_normalize({
'time_spent': '123',
})
assert data['time_spent'] == 123
def test_fingerprints(self):
data = self.validate_and_normalize({
'fingerprint': '2012-01-01T10:30:45',
})
assert not data.get('fingerprint')
assert data['errors'][0]['type'] == 'invalid_data'
assert data['errors'][0]['name'] == 'fingerprint'
data = self.validate_and_normalize({
'fingerprint': ['foo', ['bar']],
})
assert not data.get('fingerprint')
assert data['errors'][0]['type'] == 'invalid_data'
assert data['errors'][0]['name'] == 'fingerprint'
data = self.validate_and_normalize({
'fingerprint': ['{{default}}', 1, 'bar', 4.5],
})
assert data.get('fingerprint') == ['{{default}}', '1', 'bar', '4.5']
assert len(data['errors']) == 0
def test_messages(self):
# Just 'message': wrap it in interface
data = self.validate_and_normalize({
'message': 'foo is bar',
})
assert 'message' not in data
assert data['sentry.interfaces.Message'] == {'message': 'foo is bar'}
# both 'message' and interface with no 'formatted' value, put 'message'
# into 'formatted'.
data = self.validate_and_normalize({
'message': 'foo is bar',
'sentry.interfaces.Message': {
'message': 'something else',
}
})
assert 'message' not in data
assert data['sentry.interfaces.Message'] == {
'message': 'something else',
'formatted': 'foo is bar'
}
# both 'message' and complete interface, 'message' is discarded
data = self.validate_and_normalize({
'message': 'foo is bar',
'sentry.interfaces.Message': {
'message': 'something else',
'formatted': 'something else formatted',
}
})
assert 'message' not in data
assert len(data['errors']) == 0
assert data['sentry.interfaces.Message'] == {
'message': 'something else',
'formatted': 'something else formatted'
}
@pytest.mark.skip(reason="Message behavior that didn't make a lot of sense.")
def test_messages_old_behavior(self):
# both 'message' and complete valid interface but interface has the same
# value for both keys so the 'formatted' value is discarded and ends up
# being replaced with 'message'
data = self.validate_and_normalize({
'message': 'foo is bar',
'sentry.interfaces.Message': {
'message': 'something else',
'formatted': 'something else',
}
})
assert 'message' not in data
assert len(data['errors']) == 0
assert data['sentry.interfaces.Message'] == {
'message': 'something else',
'formatted': 'foo is bar'
}
# interface discarded as invalid, replaced by new interface containing
# wrapped 'message'
data = self.validate_and_normalize({
'message': 'foo is bar',
'sentry.interfaces.Message': {
'invalid': 'invalid',
}
})
assert 'message' not in data
assert len(data['errors']) == 1
assert data['sentry.interfaces.Message'] == {
'message': 'foo is bar'
}
class SafelyLoadJSONStringTest(BaseAPITest):
def test_valid_payload(self):
data = self.helper.safely_load_json_string('{"foo": "bar"}')
assert data == {'foo': 'bar'}
def test_invalid_json(self):
with self.assertRaises(APIError):
self.helper.safely_load_json_string('{')
def test_unexpected_type(self):
with self.assertRaises(APIError):
self.helper.safely_load_json_string('1')
class DecodeDataTest(BaseAPITest):
def test_valid_data(self):
data = self.helper.decode_data('foo')
assert data == u'foo'
assert type(data) == six.text_type
def test_invalid_data(self):
with self.assertRaises(APIError):
self.helper.decode_data('\x99')
class GetInterfaceTest(TestCase):
def test_does_not_let_through_disallowed_name(self):
with self.assertRaises(ValueError):
get_interface('subprocess')
def test_allows_http(self):
from sentry.interfaces.http import Http
result = get_interface('sentry.interfaces.Http')
assert result is Http
result = get_interface('request')
assert result is Http
class EnsureHasIpTest(BaseAPITest):
def test_with_remote_addr(self):
inp = {
'sentry.interfaces.Http': {
'url': 'http://example.com/',
'env': {
'REMOTE_ADDR': '192.168.0.1',
},
},
}
out = inp.copy()
self.validate_and_normalize(out, {'client_ip': '127.0.0.1'})
assert out['sentry.interfaces.Http']['env']['REMOTE_ADDR'] == '192.168.0.1'
def test_with_user_ip(self):
inp = {
'sentry.interfaces.User': {
'ip_address': '192.168.0.1',
},
}
out = inp.copy()
self.validate_and_normalize(out, {'client_ip': '127.0.0.1'})
assert out['sentry.interfaces.User']['ip_address'] == '192.168.0.1'
def test_with_user_auto_ip(self):
out = {
'sentry.interfaces.User': {
'ip_address': '{{auto}}',
},
}
self.validate_and_normalize(out, {'client_ip': '127.0.0.1'})
assert out['sentry.interfaces.User']['ip_address'] == '127.0.0.1'
out = {
'user': {
'ip_address': '{{auto}}',
},
}
self.validate_and_normalize(out, {'client_ip': '127.0.0.1'})
assert out['sentry.interfaces.User']['ip_address'] == '127.0.0.1'
def test_without_ip_values(self):
out = {
'platform': 'javascript',
'sentry.interfaces.User': {},
'sentry.interfaces.Http': {
'url': 'http://example.com/',
'env': {},
},
}
self.validate_and_normalize(out, {'client_ip': '127.0.0.1'})
assert out['sentry.interfaces.User']['ip_address'] == '127.0.0.1'
def test_without_any_values(self):
out = {
'platform': 'javascript',
}
self.validate_and_normalize(out, {'client_ip': '127.0.0.1'})
assert out['sentry.interfaces.User']['ip_address'] == '127.0.0.1'
def test_with_http_auto_ip(self):
out = {
'sentry.interfaces.Http': {
'url': 'http://example.com/',
'env': {
'REMOTE_ADDR': '{{auto}}',
},
},
}
self.validate_and_normalize(out, {'client_ip': '127.0.0.1'})
assert out['sentry.interfaces.Http']['env']['REMOTE_ADDR'] == '127.0.0.1'
def test_with_all_auto_ip(self):
out = {
'sentry.interfaces.User': {
'ip_address': '{{auto}}',
},
'sentry.interfaces.Http': {
'url': 'http://example.com/',
'env': {
'REMOTE_ADDR': '{{auto}}',
},
},
}
self.validate_and_normalize(out, {'client_ip': '127.0.0.1'})
assert out['sentry.interfaces.Http']['env']['REMOTE_ADDR'] == '127.0.0.1'
assert out['sentry.interfaces.User']['ip_address'] == '127.0.0.1'
class CspApiHelperTest(BaseAPITest):
helper_cls = CspApiHelper
def test_validate_basic(self):
report = {
"document-uri":
"http://45.55.25.245:8123/csp",
"referrer":
"http://example.com",
"violated-directive":
"img-src https://45.55.25.245:8123/",
"effective-directive":
"img-src",
"original-policy":
"default-src https://45.55.25.245:8123/; child-src https://45.55.25.245:8123/; connect-src https://45.55.25.245:8123/; font-src https://45.55.25.245:8123/; img-src https://45.55.25.245:8123/; media-src https://45.55.25.245:8123/; object-src https://45.55.25.245:8123/; script-src https://45.55.25.245:8123/; style-src https://45.55.25.245:8123/; form-action https://45.55.25.245:8123/; frame-ancestors 'none'; plugin-types 'none'; report-uri http://45.55.25.245:8123/csp-report?os=OS%20X&device=&browser_version=43.0&browser=chrome&os_version=Lion",
"blocked-uri":
"http://google.com",
"status-code":
200,
"_meta": {
"release": "abc123",
}
}
result = self.validate_and_normalize(report)
assert result['logger'] == 'csp'
assert result['release'] == 'abc123'
assert result['errors'] == []
assert 'sentry.interfaces.Message' in result
assert 'culprit' in result
assert result['tags'] == [
('effective-directive', 'img-src'),
('blocked-uri', 'http://google.com'),
]
assert result['sentry.interfaces.User'] == {'ip_address': '198.51.100.0'}
assert result['sentry.interfaces.Http']['url'] == 'http://45.55.25.245:8123/csp'
assert dict(result['sentry.interfaces.Http']['headers']) == {
'User-Agent': 'Awesome Browser',
'Referer': 'http://example.com'
}
@mock.patch('sentry.interfaces.csp.Csp.to_python', mock.Mock(side_effect=Exception))
def test_validate_raises_invalid_interface(self):
with self.assertRaises(APIForbidden):
self.validate_and_normalize({})
def test_tags_out_of_bounds(self):
report = {
"document-uri":
"http://45.55.25.245:8123/csp",
"referrer":
"http://example.com",
"violated-directive":
"img-src https://45.55.25.245:8123/",
"effective-directive":
"img-src",
"original-policy":
"default-src https://45.55.25.245:8123/; child-src https://45.55.25.245:8123/; connect-src https://45.55.25.245:8123/; font-src https://45.55.25.245:8123/; img-src https://45.55.25.245:8123/; media-src https://45.55.25.245:8123/; object-src https://45.55.25.245:8123/; script-src https://45.55.25.245:8123/; style-src https://45.55.25.245:8123/; form-action https://45.55.25.245:8123/; frame-ancestors 'none'; plugin-types 'none'; report-uri http://45.55.25.245:8123/csp-report?os=OS%20X&device=&browser_version=43.0&browser=chrome&os_version=Lion",
"blocked-uri":
"v" * 201,
"status-code":
200,
"_meta": {
"release": "abc123",
}
}
result = self.validate_and_normalize(report)
assert result['tags'] == [
('effective-directive', 'img-src'),
]
assert len(result['errors']) == 1
def test_tag_value(self):
report = {
"document-uri":
"http://45.55.25.245:8123/csp",
"referrer":
"http://example.com",
"violated-directive":
"img-src https://45.55.25.245:8123/",
"effective-directive":
"img-src",
"original-policy":
"default-src https://45.55.25.245:8123/; child-src https://45.55.25.245:8123/; connect-src https://45.55.25.245:8123/; font-src https://45.55.25.245:8123/; img-src https://45.55.25.245:8123/; media-src https://45.55.25.245:8123/; object-src https://45.55.25.245:8123/; script-src https://45.55.25.245:8123/; style-src https://45.55.25.245:8123/; form-action https://45.55.25.245:8123/; frame-ancestors 'none'; plugin-types 'none'; report-uri http://45.55.25.245:8123/csp-report?os=OS%20X&device=&browser_version=43.0&browser=chrome&os_version=Lion",
"blocked-uri":
"http://google.com",
"status-code":
200,
"_meta": {
"release": "abc123",
}
}
result = self.validate_and_normalize(report)
assert result['tags'] == [
('effective-directive', 'img-src'),
('blocked-uri', 'http://google.com'),
]
assert len(result['errors']) == 0
def test_no_tags(self):
report = {
"document-uri":
"http://45.55.25.245:8123/csp",
"referrer":
"http://example.com",
"violated-directive":
"img-src https://45.55.25.245:8123/",
"effective-directive":
"v" * 201,
"original-policy":
"default-src https://45.55.25.245:8123/; child-src https://45.55.25.245:8123/; connect-src https://45.55.25.245:8123/; font-src https://45.55.25.245:8123/; img-src https://45.55.25.245:8123/; media-src https://45.55.25.245:8123/; object-src https://45.55.25.245:8123/; script-src https://45.55.25.245:8123/; style-src https://45.55.25.245:8123/; form-action https://45.55.25.245:8123/; frame-ancestors 'none'; plugin-types 'none'; report-uri http://45.55.25.245:8123/csp-report?os=OS%20X&device=&browser_version=43.0&browser=chrome&os_version=Lion",
"blocked-uri":
"http://goo\ngle.com",
"status-code":
200,
"_meta": {
"release": "abc123",
}
}
result = self.validate_and_normalize(report)
assert result['tags'] == []
assert len(result['errors']) == 2
|
{
"content_hash": "39696157fb8a2d127c350264356b5e65",
"timestamp": "",
"source": "github",
"line_count": 742,
"max_line_length": 571,
"avg_line_length": 37.594339622641506,
"alnum_prop": 0.5430722351675927,
"repo_name": "gencer/sentry",
"id": "d1cd513b8dab2645ad28df4b53a747738f7d4f82",
"size": "27920",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/sentry/coreapi/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "318167"
},
{
"name": "HTML",
"bytes": "281885"
},
{
"name": "JavaScript",
"bytes": "2342569"
},
{
"name": "Lua",
"bytes": "65795"
},
{
"name": "Makefile",
"bytes": "8393"
},
{
"name": "Python",
"bytes": "28161647"
},
{
"name": "Ruby",
"bytes": "4233"
},
{
"name": "Shell",
"bytes": "2149"
}
],
"symlink_target": ""
}
|
"""Provides a way of easily overriding labels in skin.conf files with other languages.
Useful for skin designers who want to use multiple skin.conf files and easily make
the skins available in multiple languages.
1) Place this file in bin/user/translategenerator.py
2) Store each language specific file in skins/languages/[language].conf
Take a look at an existing one to see how they work.
3) Add this section to each skin.conf:
[Language]
#
# Set a language below and labels will be overridden with any that are specified in
# skins/languages/[language].conf
#
language = espanol
4) Replace any instances of CheetahGenerator or ImageGenerator in skin.conf with
these translation classes:
[Generators]
# Change from this:
# generator_list = weewx.cheetahgenerator.CheetahGenerator, weewx.imagegenerator.ImageGenerator, weewx.reportengine.CopyGenerator
# To this:
generator_list = user.translategenerator.CheetahGeneratorTranslated, user.translategenerator.ImageGeneratorTranslated, weewx.reportengine.CopyGenerator
"""
import syslog
import os.path
from configobj import ConfigObj
from weewx.imagegenerator import ImageGenerator
from weewx.cheetahgenerator import CheetahGenerator
class ImageGeneratorTranslated(ImageGenerator):
"""Overwrite skin.conf dictionary with language specific entries"""
def setup(self):
language_dict = _get_language_dict(self.skin_dict, self.config_dict)
if language_dict is not None:
self.skin_dict.merge(language_dict)
ImageGenerator.setup(self)
class CheetahGeneratorTranslated(CheetahGenerator):
"""Overwrite skin.conf dictionary with language specific entries"""
def setup(self):
language_dict = _get_language_dict(self.skin_dict, self.config_dict)
if language_dict is not None:
self.skin_dict.merge(language_dict)
CheetahGenerator.setup(self)
def _get_language_dict(skin_dict, config_dict):
"""Look for this section in the skin.conf dictionary:
[Language]
language = espanol
Returns None if not found, or a link to the corresponding language.conf
dictionary."""
language_dict = None
if 'Language' in skin_dict:
if 'language' in skin_dict['Language']:
language = skin_dict['Language']['language']
syslog.syslog(syslog.LOG_INFO, "%s: Language is %s" % (os.path.basename(__file__), language))
# Figure out where the language config files can be found
language_config_path = os.path.join(config_dict['WEEWX_ROOT'], config_dict['StdReport']['SKIN_ROOT'],
'languages', "%s.conf" % language)
try:
language_dict = ConfigObj(language_config_path)
except:
syslog.syslog(syslog.LOG_INFO, "%s: Could not import lanuguage dictionary %s" %
os.path.basename(__file__), language_config_path)
language_dict = None
if language_dict is None:
syslog.syslog(syslog.LOG_DEBUG, "%s: No language override specified." % (os.path.basename(__file__)))
return language_dict
|
{
"content_hash": "d15c90c5e66832e279438defa6c5b376",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 159,
"avg_line_length": 34.84782608695652,
"alnum_prop": 0.6834061135371179,
"repo_name": "tony-rasskazov/meteo",
"id": "1968dc096ed6d73e0bbab046c047059809079592",
"size": "3332",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "weewx/bin/user/translategenerator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "12966"
},
{
"name": "HTML",
"bytes": "5186"
},
{
"name": "JavaScript",
"bytes": "9056"
},
{
"name": "Python",
"bytes": "3806"
}
],
"symlink_target": ""
}
|
c.NotebookApp.allow_password_change = True
## Allow requests where the Host header doesn't point to a local server
#
# By default, requests get a 403 forbidden response if the 'Host' header shows
# that the browser thinks it's on a non-local domain. Setting this option to
# True disables this check.
#
# This protects against 'DNS rebinding' attacks, where a remote web server
# serves you a page and then changes its DNS to send later requests to a local
# IP, bypassing same-origin checks.
#
# Local IP addresses (such as 127.0.0.1 and ::1) are allowed as local, along
# with hostnames configured in local_hostnames.
#c.NotebookApp.allow_remote_access = False
## Whether to allow the user to run the notebook as root.
#c.NotebookApp.allow_root = False
## DEPRECATED use base_url
#c.NotebookApp.base_project_url = '/'
## The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
#c.NotebookApp.base_url = '/'
## Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
#c.NotebookApp.browser = ''
## The full path to an SSL/TLS certificate file.
#c.NotebookApp.certfile = ''
## The full path to a certificate authority certificate for SSL/TLS client
# authentication.
#c.NotebookApp.client_ca = ''
## The config manager class to use
#c.NotebookApp.config_manager_class = 'notebook.services.config.manager.ConfigManager'
## The notebook manager class to use.
#c.NotebookApp.contents_manager_class = 'notebook.services.contents.largefilemanager.LargeFileManager'
## Extra keyword arguments to pass to `set_secure_cookie`. See tornado's
# set_secure_cookie docs for details.
#c.NotebookApp.cookie_options = {}
## The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
#c.NotebookApp.cookie_secret = b''
## The file where the cookie secret is stored.
#c.NotebookApp.cookie_secret_file = ''
## Override URL shown to users.
#
# Replace actual URL, including protocol, address, port and base URL, with the
# given value when displaying URL to the users. Do not change the actual
# connection URL. If authentication token is enabled, the token is added to the
# custom URL automatically.
#
# This option is intended to be used when the URL to display to the user cannot
# be determined reliably by the Jupyter notebook server (proxified or
# containerized setups for example).
#c.NotebookApp.custom_display_url = ''
## The default URL to redirect to from `/`
#c.NotebookApp.default_url = '/tree'
## Disable cross-site-request-forgery protection
#
# Jupyter notebook 4.3.1 introduces protection from cross-site request
# forgeries, requiring API requests to either:
#
# - originate from pages served by this server (validated with XSRF cookie and
# token), or - authenticate with a token
#
# Some anonymous compute resources still desire the ability to run code,
# completely without authentication. These services can disable all
# authentication and security checks, with the full knowledge of what that
# implies.
#c.NotebookApp.disable_check_xsrf = False
## Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
#c.NotebookApp.enable_mathjax = True
## extra paths to look for Javascript notebook extensions
#c.NotebookApp.extra_nbextensions_path = []
## handlers that should be loaded at higher priority than the default services
#c.NotebookApp.extra_services = []
## Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
#c.NotebookApp.extra_static_paths = []
## Extra paths to search for serving jinja templates.
#
# Can be used to override templates from notebook.templates.
#c.NotebookApp.extra_template_paths = []
##
#c.NotebookApp.file_to_run = ''
## Extra keyword arguments to pass to `get_secure_cookie`. See tornado's
# get_secure_cookie docs for details.
#c.NotebookApp.get_secure_cookie_kwargs = {}
## Deprecated: Use minified JS file or not, mainly use during dev to avoid JS
# recompilation
#c.NotebookApp.ignore_minified_js = False
## (bytes/sec) Maximum rate at which stream output can be sent on iopub before
# they are limited.
#c.NotebookApp.iopub_data_rate_limit = 1000000
## (msgs/sec) Maximum rate at which messages can be sent on iopub before they are
# limited.
#c.NotebookApp.iopub_msg_rate_limit = 1000
## The IP address the notebook server will listen on.
#c.NotebookApp.ip = 'localhost'
## Supply extra arguments that will be passed to Jinja environment.
#c.NotebookApp.jinja_environment_options = {}
## Extra variables to supply to jinja templates when rendering.
#c.NotebookApp.jinja_template_vars = {}
## The kernel manager class to use.
#c.NotebookApp.kernel_manager_class = 'notebook.services.kernels.kernelmanager.MappingKernelManager'
## The kernel spec manager class to use. Should be a subclass of
# `jupyter_client.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of Jupyter and the next stable one.
#c.NotebookApp.kernel_spec_manager_class = 'jupyter_client.kernelspec.KernelSpecManager'
## The full path to a private key file for usage with SSL/TLS.
#c.NotebookApp.keyfile = ''
## Hostnames to allow as local when allow_remote_access is False.
#
# Local IP addresses (such as 127.0.0.1 and ::1) are automatically accepted as
# local as well.
#c.NotebookApp.local_hostnames = ['localhost']
## The login handler class to use.
#c.NotebookApp.login_handler_class = 'notebook.auth.login.LoginHandler'
## The logout handler class to use.
#c.NotebookApp.logout_handler_class = 'notebook.auth.logout.LogoutHandler'
## The MathJax.js configuration file that is to be used.
#c.NotebookApp.mathjax_config = 'TeX-AMS-MML_HTMLorMML-full,Safe'
## A custom url for MathJax.js. Should be in the form of a case-sensitive url to
# MathJax, for example: /static/components/MathJax/MathJax.js
#c.NotebookApp.mathjax_url = ''
## Sets the maximum allowed size of the client request body, specified in the
# Content-Length request header field. If the size in a request exceeds the
# configured value, a malformed HTTP message is returned to the client.
#
# Note: max_body_size is applied even in streaming mode.
#c.NotebookApp.max_body_size = 536870912
## Gets or sets the maximum amount of memory, in bytes, that is allocated for
# use by the buffer manager.
#c.NotebookApp.max_buffer_size = 536870912
## Gets or sets a lower bound on the open file handles process resource limit.
# This may need to be increased if you run into an OSError: [Errno 24] Too many
# open files. This is not applicable when running on Windows.
#c.NotebookApp.min_open_files_limit = 0
## Dict of Python modules to load as notebook server extensions.Entry values can
# be used to enable and disable the loading ofthe extensions. The extensions
# will be loaded in alphabetical order.
#c.NotebookApp.nbserver_extensions = {}
## The directory to use for notebooks and kernels.
#c.NotebookApp.notebook_dir = ''
## Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
#c.NotebookApp.open_browser = True
## Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from notebook.auth import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
#c.NotebookApp.password = ''
c.NotebookApp.password = u'sha1:0892b1ba8e65:a67442560a5ec5092819bf31cfa54de102d94b8b'
## Forces users to use a password for the Notebook server. This is useful in a
# multi user environment, for instance when everybody in the LAN can access each
# other's machine through ssh.
#
# In such a case, server the notebook server on localhost is not secure since
# any user can connect to the notebook server via ssh.
#c.NotebookApp.password_required = False
## The port the notebook server will listen on.
#c.NotebookApp.port = 8888
## The number of additional ports to try if the specified port is not available.
#c.NotebookApp.port_retries = 50
## DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
#c.NotebookApp.pylab = 'disabled'
## If True, display a button in the dashboard to quit (shutdown the notebook
# server).
#c.NotebookApp.quit_button = True
## (sec) Time window used to check the message and data rate limits.
#c.NotebookApp.rate_limit_window = 3
## Reraise exceptions encountered loading server extensions?
#c.NotebookApp.reraise_server_extension_failures = False
## DEPRECATED use the nbserver_extensions dict instead
#c.NotebookApp.server_extensions = []
## The session manager class to use.
#c.NotebookApp.session_manager_class = 'notebook.services.sessions.sessionmanager.SessionManager'
## Shut down the server after N seconds with no kernels or terminals running and
# no activity. This can be used together with culling idle kernels
# (MappingKernelManager.cull_idle_timeout) to shutdown the notebook server when
# it's not in use. This is not precisely timed: it may shut down up to a minute
# later. 0 (the default) disables this automatic shutdown.
#c.NotebookApp.shutdown_no_activity_timeout = 0
## Supply SSL options for the tornado HTTPServer. See the tornado docs for
# details.
#c.NotebookApp.ssl_options = {}
## Supply overrides for terminado. Currently only supports "shell_command".
#c.NotebookApp.terminado_settings = {}
## Set to False to disable terminals.
#
# This does *not* make the notebook server more secure by itself. Anything the
# user can in a terminal, they can also do in a notebook.
#
# Terminals may also be automatically disabled if the terminado package is not
# available.
#c.NotebookApp.terminals_enabled = True
## Token used for authenticating first-time connections to the server.
#
# When no password is enabled, the default is to generate a new, random token.
#
# Setting to an empty string disables authentication altogether, which is NOT
# RECOMMENDED.
#c.NotebookApp.token = '<generated>'
## Supply overrides for the tornado.web.Application that the Jupyter notebook
# uses.
#c.NotebookApp.tornado_settings = {}
## Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
#c.NotebookApp.trust_xheaders = False
## Disable launching browser by redirect file
#
# For versions of notebook > 5.7.2, a security feature measure was added that
# prevented the authentication token used to launch the browser from being
# visible. This feature makes it difficult for other users on a multi-user
# system from running code in your Jupyter session as you.
#
# However, some environments (like Windows Subsystem for Linux (WSL) and
# Chromebooks), launching a browser using a redirect file can lead the browser
# failing to load. This is because of the difference in file structures/paths
# between the runtime and the browser.
#
# Disabling this setting to False will disable this behavior, allowing the
# browser to launch by using a URL and visible token (as before).
#c.NotebookApp.use_redirect_file = True
## DEPRECATED, use tornado_settings
#c.NotebookApp.webapp_settings = {}
## Specify Where to open the notebook on startup. This is the `new` argument
# passed to the standard library method `webbrowser.open`. The behaviour is not
# guaranteed, but depends on browser support. Valid values are:
#
# - 2 opens a new tab,
# - 1 opens a new window,
# - 0 opens in an existing window.
#
# See the `webbrowser.open` documentation for details.
#c.NotebookApp.webbrowser_open_new = 2
## Set the tornado compression options for websocket connections.
#
# This value will be returned from
# :meth:`WebSocketHandler.get_compression_options`. None (default) will disable
# compression. A dict (even an empty one) will enable compression.
#
# See the tornado docs for WebSocketHandler.get_compression_options for details.
#c.NotebookApp.websocket_compression_options = None
## The base URL for websockets, if it differs from the HTTP server (hint: it
# almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
#c.NotebookApp.websocket_url = ''
#------------------------------------------------------------------------------
# ConnectionFileMixin(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Mixin for configurable classes that work with connection files
## JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
#c.ConnectionFileMixin.connection_file = ''
## set the control (ROUTER) port [default: random]
#c.ConnectionFileMixin.control_port = 0
## set the heartbeat port [default: random]
#c.ConnectionFileMixin.hb_port = 0
## set the iopub (PUB) port [default: random]
#c.ConnectionFileMixin.iopub_port = 0
## Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
#c.ConnectionFileMixin.ip = ''
## set the shell (ROUTER) port [default: random]
#c.ConnectionFileMixin.shell_port = 0
## set the stdin (ROUTER) port [default: random]
#c.ConnectionFileMixin.stdin_port = 0
##
#c.ConnectionFileMixin.transport = 'tcp'
#------------------------------------------------------------------------------
# KernelManager(ConnectionFileMixin) configuration
#------------------------------------------------------------------------------
## Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
## Should we autorestart the kernel if it dies.
#c.KernelManager.autorestart = True
## DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, Jupyter does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the Jupyter command
# line.
#c.KernelManager.kernel_cmd = []
## Time to wait for a kernel to terminate before killing it, in seconds.
#c.KernelManager.shutdown_wait_time = 5.0
#------------------------------------------------------------------------------
# Session(Configurable) configuration
#------------------------------------------------------------------------------
## Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
## Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
#c.Session.buffer_threshold = 1024
## Whether to check PID to protect against calls after fork.
#
# This check can be disabled if fork-safety is handled elsewhere.
#c.Session.check_pid = True
## Threshold (in bytes) beyond which a buffer should be sent without copying.
#c.Session.copy_threshold = 65536
## Debug output in the Session
#c.Session.debug = False
## The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
#c.Session.digest_history_size = 65536
## The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
#c.Session.item_threshold = 64
## execution key, for signing messages.
#c.Session.key = b''
## path to file containing execution key.
#c.Session.keyfile = ''
## Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
#c.Session.metadata = {}
## The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
#c.Session.packer = 'json'
## The UUID identifying this session.
#c.Session.session = ''
## The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
#c.Session.signature_scheme = 'hmac-sha256'
## The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
#c.Session.unpacker = 'json'
## Username for the Session. Default is your system username.
#c.Session.username = 'username'
#------------------------------------------------------------------------------
# MultiKernelManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for managing multiple kernels.
## The name of the default kernel to start
#c.MultiKernelManager.default_kernel_name = 'python3'
## The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
#c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# MappingKernelManager(MultiKernelManager) configuration
#------------------------------------------------------------------------------
## A KernelManager that handles notebook mapping and HTTP error handling
## White list of allowed kernel message types. When the list is empty, all
# message types are allowed.
#c.MappingKernelManager.allowed_message_types = []
## Whether messages from kernels whose frontends have disconnected should be
# buffered in-memory.
#
# When True (default), messages are buffered and replayed on reconnect, avoiding
# lost messages due to interrupted connectivity.
#
# Disable if long-running kernels will produce too much output while no
# frontends are connected.
#c.MappingKernelManager.buffer_offline_messages = True
## Whether to consider culling kernels which are busy. Only effective if
# cull_idle_timeout > 0.
#c.MappingKernelManager.cull_busy = False
## Whether to consider culling kernels which have one or more connections. Only
# effective if cull_idle_timeout > 0.
#c.MappingKernelManager.cull_connected = False
## Timeout (in seconds) after which a kernel is considered idle and ready to be
# culled. Values of 0 or lower disable culling. Very short timeouts may result
# in kernels being culled for users with poor network connections.
#c.MappingKernelManager.cull_idle_timeout = 0
## The interval (in seconds) on which to check for idle kernels exceeding the
# cull timeout value.
#c.MappingKernelManager.cull_interval = 300
## Timeout for giving up on a kernel (in seconds).
#
# On starting and restarting kernels, we check whether the kernel is running and
# responsive by sending kernel_info_requests. This sets the timeout in seconds
# for how long the kernel can take before being presumed dead. This affects the
# MappingKernelManager (which handles kernel restarts) and the
# ZMQChannelsHandler (which handles the startup).
#c.MappingKernelManager.kernel_info_timeout = 60
##
#c.MappingKernelManager.root_dir = ''
#------------------------------------------------------------------------------
# KernelSpecManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## If there is no Python kernelspec registered and the IPython kernel is
# available, ensure it is added to the spec list.
#c.KernelSpecManager.ensure_native_kernel = True
## The kernel spec class. This is configurable to allow subclassing of the
# KernelSpecManager for customized behavior.
#c.KernelSpecManager.kernel_spec_class = 'jupyter_client.kernelspec.KernelSpec'
## Whitelist of allowed kernel names.
#
# By default, all installed kernels are allowed.
#c.KernelSpecManager.whitelist = set()
#------------------------------------------------------------------------------
# ContentsManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
## Allow access to hidden files
#c.ContentsManager.allow_hidden = False
##
#c.ContentsManager.checkpoints = None
##
#c.ContentsManager.checkpoints_class = 'notebook.services.contents.checkpoints.Checkpoints'
##
#c.ContentsManager.checkpoints_kwargs = {}
## handler class to use when serving raw file requests.
#
# Default is a fallback that talks to the ContentsManager API, which may be
# inefficient, especially for large files.
#
# Local files-based ContentsManagers can use a StaticFileHandler subclass, which
# will be much more efficient.
#
# Access to these files should be Authenticated.
#c.ContentsManager.files_handler_class = 'notebook.files.handlers.FilesHandler'
## Extra parameters to pass to files_handler_class.
#
# For example, StaticFileHandlers generally expect a `path` argument specifying
# the root directory from which to serve files.
#c.ContentsManager.files_handler_params = {}
## Glob patterns to hide in file and directory listings.
#c.ContentsManager.hide_globs = ['__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~']
## Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
#c.ContentsManager.pre_save_hook = None
##
#c.ContentsManager.root_dir = '/'
## The base name used when creating untitled directories.
#c.ContentsManager.untitled_directory = 'Untitled Folder'
## The base name used when creating untitled files.
#c.ContentsManager.untitled_file = 'untitled'
## The base name used when creating untitled notebooks.
#c.ContentsManager.untitled_notebook = 'Untitled'
#------------------------------------------------------------------------------
# FileManagerMixin(Configurable) configuration
#------------------------------------------------------------------------------
## Mixin for ContentsAPI classes that interact with the filesystem.
#
# Provides facilities for reading, writing, and copying both notebooks and
# generic files.
#
# Shared by FileContentsManager and FileCheckpoints.
#
# Note ---- Classes using this mixin must provide the following attributes:
#
# root_dir : unicode
# A directory against against which API-style paths are to be resolved.
#
# log : logging.Logger
## By default notebooks are saved on disk on a temporary file and then if
# successfully written, it replaces the old ones. This procedure, namely
# 'atomic_writing', causes some bugs on file system without operation order
# enforcement (like some networked fs). If set to False, the new notebook is
# written directly on the old one which could fail (eg: full filesystem or quota
# )
#c.FileManagerMixin.use_atomic_writing = True
#------------------------------------------------------------------------------
# FileContentsManager(FileManagerMixin,ContentsManager) configuration
#------------------------------------------------------------------------------
## If True (default), deleting files will send them to the platform's
# trash/recycle bin, where they can be recovered. If False, deleting files
# really deletes them.
#c.FileContentsManager.delete_to_trash = True
## Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk, such as converting the notebook
# to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# - path: the filesystem path to the file just written - model: the model
# representing the file - contents_manager: this ContentsManager instance
#c.FileContentsManager.post_save_hook = None
##
#c.FileContentsManager.root_dir = ''
## DEPRECATED, use post_save_hook. Will be removed in Notebook 5.0
#c.FileContentsManager.save_script = False
#------------------------------------------------------------------------------
# NotebookNotary(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for computing and verifying notebook signatures.
## The hashing algorithm used to sign notebooks.
#c.NotebookNotary.algorithm = 'sha256'
## The sqlite file in which to store notebook signatures. By default, this will
# be in your Jupyter data directory. You can set it to ':memory:' to disable
# sqlite writing to the filesystem.
#c.NotebookNotary.db_file = ''
## The secret key with which notebooks are signed.
#c.NotebookNotary.secret = b''
## The file where the secret key is stored.
#c.NotebookNotary.secret_file = ''
## A callable returning the storage backend for notebook signatures. The default
# uses an SQLite database.
#c.NotebookNotary.store_factory = traitlets.Undefined
#------------------------------------------------------------------------------
# GatewayKernelManager(MappingKernelManager) configuration
#------------------------------------------------------------------------------
## Kernel manager that supports remote kernels hosted by Jupyter Kernel or
# Enterprise Gateway.
#------------------------------------------------------------------------------
# GatewayKernelSpecManager(KernelSpecManager) configuration
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# GatewayClient(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## This class manages the configuration. It's its own singleton class so that we
# can share these values across all objects. It also contains some helper methods
# to build request arguments out of the various config options.
## The authorization token used in the HTTP headers. (JUPYTER_GATEWAY_AUTH_TOKEN
# env var)
#c.GatewayClient.auth_token = None
## The filename of CA certificates or None to use defaults.
# (JUPYTER_GATEWAY_CA_CERTS env var)
#c.GatewayClient.ca_certs = None
## The filename for client SSL certificate, if any. (JUPYTER_GATEWAY_CLIENT_CERT
# env var)
#c.GatewayClient.client_cert = None
## The filename for client SSL key, if any. (JUPYTER_GATEWAY_CLIENT_KEY env var)
#c.GatewayClient.client_key = None
## The time allowed for HTTP connection establishment with the Gateway server.
# (JUPYTER_GATEWAY_CONNECT_TIMEOUT env var)
#c.GatewayClient.connect_timeout = 60.0
## A comma-separated list of environment variable names that will be included,
# along with their values, in the kernel startup request. The corresponding
# `env_whitelist` configuration value must also be set on the Gateway server -
# since that configuration value indicates which environmental values to make
# available to the kernel. (JUPYTER_GATEWAY_ENV_WHITELIST env var)
#c.GatewayClient.env_whitelist = ''
## Additional HTTP headers to pass on the request. This value will be converted
# to a dict. (JUPYTER_GATEWAY_HEADERS env var)
#c.GatewayClient.headers = '{}'
## The password for HTTP authentication. (JUPYTER_GATEWAY_HTTP_PWD env var)
#c.GatewayClient.http_pwd = None
## The username for HTTP authentication. (JUPYTER_GATEWAY_HTTP_USER env var)
#c.GatewayClient.http_user = None
## The gateway API endpoint for accessing kernel resources
# (JUPYTER_GATEWAY_KERNELS_ENDPOINT env var)
#c.GatewayClient.kernels_endpoint = '/api/kernels'
## The gateway API endpoint for accessing kernelspecs
# (JUPYTER_GATEWAY_KERNELSPECS_ENDPOINT env var)
#c.GatewayClient.kernelspecs_endpoint = '/api/kernelspecs'
## The gateway endpoint for accessing kernelspecs resources
# (JUPYTER_GATEWAY_KERNELSPECS_RESOURCE_ENDPOINT env var)
#c.GatewayClient.kernelspecs_resource_endpoint = '/kernelspecs'
## The time allowed for HTTP request completion. (JUPYTER_GATEWAY_REQUEST_TIMEOUT
# env var)
#c.GatewayClient.request_timeout = 60.0
## The url of the Kernel or Enterprise Gateway server where kernel specifications
# are defined and kernel management takes place. If defined, this Notebook
# server acts as a proxy for all kernel management and kernel specification
# retrieval. (JUPYTER_GATEWAY_URL env var)
#c.GatewayClient.url = None
## For HTTPS requests, determines if server's certificate should be validated or
# not. (JUPYTER_GATEWAY_VALIDATE_CERT env var)
#c.GatewayClient.validate_cert = True
## The websocket url of the Kernel or Enterprise Gateway server. If not
# provided, this value will correspond to the value of the Gateway url with 'ws'
# in place of 'http'. (JUPYTER_GATEWAY_WS_URL env var)
#c.GatewayClient.ws_url = None
|
{
"content_hash": "078b94eb3ec53dff449c04c9c753c20f",
"timestamp": "",
"source": "github",
"line_count": 810,
"max_line_length": 103,
"avg_line_length": 39.8604938271605,
"alnum_prop": 0.7053612909220429,
"repo_name": "scray/scray",
"id": "f1aa09338dcfb5dcbf21ea550c070506f0727011",
"size": "34506",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "projects/persistent-traffic-data/analytics-workbench/conf/jupyter_notebook_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "627"
},
{
"name": "HTML",
"bytes": "3543"
},
{
"name": "Java",
"bytes": "250117"
},
{
"name": "Jupyter Notebook",
"bytes": "1220"
},
{
"name": "Python",
"bytes": "34506"
},
{
"name": "Scala",
"bytes": "1229538"
},
{
"name": "Shell",
"bytes": "2469"
},
{
"name": "Thrift",
"bytes": "15048"
}
],
"symlink_target": ""
}
|
"""FastPair: Data-structure for the dynamic closest-pair problem.
This data-structure is based on the observation that the conga line data
structure, in practice, does better the more subsets you give to it: even
though the worst case time for k subsets is O(nk log (n/k)), that worst case
seems much harder to reach than the nearest neighbor algorithm.
In the limit of arbitrarily many subsets, each new addition or point moved
by a deletion will be in a singleton subset, and the algorithm will
differ from nearest neighbors in only a couple of ways: (1) when we
create the initial data structure, we use a conga line rather than
all nearest neighbors, to keep the indegree of each point low, and
(2) when we insert a point, we don't bother updating other points'
neighbors.
Notes
-----
Total space: 20n bytes. (Could be reduced to 4n at some cost in update time.)
Time per insertion or single distance update: O(n)
Time per deletion or point update: O(n) expected, O(n^2) worst case
Time per closest pair: O(n)
References
----------
[1] Eppstein, David: Fast hierarchical clustering and other applications of
dynamic closest pairs. Journal of Experimental Algorithmics 5 (2000) 1.
"""
# Copyright (c) 2016, Carson J. Q. Farmer <carsonfarmer@gmail.com>
# Copyright (c) 2002-2015, David Eppstein
# Licensed under the MIT Licence (http://opensource.org/licenses/MIT).
from __future__ import print_function, division, absolute_import
from itertools import combinations, cycle
from operator import itemgetter
from collections import defaultdict
import scipy.spatial.distance as dist
__all__ = ["FastPair", "dist"]
class attrdict(dict):
"""Simple dict with support for accessing elements as attributes."""
def __init__(self, *args, **kwargs):
super(attrdict, self).__init__(*args, **kwargs)
self.__dict__ = self
class FastPair(object):
"""FastPair 'sketch' class.
"""
def __init__(self, min_points=10, dist=dist.euclidean):
"""Initialize an empty FastPair data-structure.
Parameters
----------
min_points : int, default=10
The minimum number of points to add before initializing the
data-structure. Queries _before_ `min_points` have been added to
the data-structure will be brute-force.
dist : function, default=scipy.spatial.distance.euclidean
Can be any Python function that returns a distance (float) between
between two vectors (tuples) `u` and `v`. Any distance function
from `scipy.spatial.distance` will do the trick. By default, the
Euclidean distance function is used. This function should play
nicely with the `merge` function.
"""
self.min_points = min_points
self.dist = dist
self.initialized = False # Has the data-structure been initialized?
self.neighbors = defaultdict(attrdict) # Dict of neighbor points and dists
self.points = list() # Internal point set; entries may be non-unique
def __add__(self, p):
"""Add a point and find its nearest neighbor.
There is some extra logic here to allow us to gradually build up the
FastPair data-structure until we reach `min_points`. Once `min_points`
has been reached, we initialize the data-structure and start to take
advantage of the FastPair efficiencies.
"""
self.points.append(p)
if self.initialized:
self._find_neighbor(p)
elif len(self) >= self.min_points:
self.build()
return self
def __sub__(self, p):
"""Remove a point and update neighbors."""
self.points.remove(p)
if self.initialized:
# We must update neighbors of points for which `p` had been nearest.
for q in self.points:
if self.neighbors[q].neigh == p:
res = self._find_neighbor(q)
return self
def __len__(self):
"""Number of points in data structure."""
return len(self.points)
def __call__(self):
"""Find closest pair by scanning list of nearest neighbors."""
return self.closest_pair()
def __contains__(self, p):
return p in self.points
def __iter__(self):
return iter(self.points)
def __getitem__(self, item):
if not item in self:
raise KeyError("{} not found".format(item))
return self.neighbors[item]
def __setitem__(self, item, value):
if not item in self:
raise KeyError("{} not found".format(item))
self._update_point(item, value)
def build(self, points=None):
"""Build a FastPairs data-structure from a set of (new) points.
Here we use a conga line rather than calling explicitly (re)building
the neighbors map multiple times as it is more efficient. This method
needs to be called _before_ querying the data-structure or adding/
removing any new points. Once it has been called, the internal
`initialized` flag will be set to True. Otherwise, simple brute-force
versions of queries and calculations will be used.
Parameters
----------
points : list of tuples/vectors, default=None
An optional list of point tuples to be added to the point set,
prior to computing the conga line and main FastPair data structure.
"""
if points is not None:
self.points += list(points)
np = len(self)
# Go through and find all neighbors, placing then in a conga line
for i in range(np - 1):
# Find neighbor to p[0] to start
nbr = i + 1
nbd = float("inf")
for j in range(i + 1, np):
d = self.dist(self.points[i], self.points[j])
if d < nbd:
nbr = j
nbd = d
# Add that edge, move nbr to points[i+1]
self.neighbors[self.points[i]].dist = nbd
self.neighbors[self.points[i]].neigh = self.points[nbr]
self.points[nbr] = self.points[i + 1]
self.points[i + 1] = self.neighbors[self.points[i]].neigh
# No more neighbors, terminate conga line.
# Last person on the line has no neigbors :(
self.neighbors[self.points[np - 1]].neigh = self.points[np - 1]
self.neighbors[self.points[np - 1]].dist = float("inf")
self.initialized = True
return self
def closest_pair(self):
"""Find closest pair by scanning list of nearest neighbors.
If `npoints` is less than `min_points`, a brute-force version
of the closest pair algrithm is used.
"""
if len(self) < 2:
raise ValueError("Must have `npoints >= 2` to form a pair.")
elif not self.initialized:
return self.closest_pair_brute_force()
a = self.points[0] # Start with first point
d = self.neighbors[a].dist
for p in self.points:
if self.neighbors[p].dist < d:
a = p # Update `a` and distance `d`
d = self.neighbors[p].dist
b = self.neighbors[a].neigh
return d, (a, b)
def closest_pair_brute_force(self):
"""Find closest pair using brute-force algorithm."""
return _closest_pair_brute_force(self.points, self.dist)
def sdist(self, p):
"""Compute distances from input to all other points in data-structure.
This returns an iterator over all other points and their distance
from the input point `p`. The resulting iterator returns tuples with
the first item giving the distance, and the second item giving in
neighbor point. The `min` of this iterator is essentially a brute-
force 'nearest-neighbor' calculation. To do this, supply `itemgetter`
(or a lambda version) as the `key` argument to `min`.
Examples
--------
>>> fp = FastPair().build(points)
>>> min(fp.sdist(point), key=itemgetter(0))
"""
return ((self.dist(a, b), b) for a, b in zip(cycle([p]), self.points) if b != a)
def _find_neighbor(self, p):
"""Find and update nearest neighbor of a given point."""
# If no neighbors available, set flag for `_update_point` to find
if len(self) < 2:
self.neighbors[p].neigh = p
self.neighbors[p].dist = float("inf")
else:
# Find first point unequal to `p` itself
first_nbr = 0
if p == self.points[first_nbr]:
first_nbr = 1
self.neighbors[p].neigh = self.points[first_nbr]
self.neighbors[p].dist = self.dist(p, self.neighbors[p].neigh)
# Now test whether each other point is closer
for q in self.points[first_nbr + 1 :]:
if p != q:
d = self.dist(p, q)
if d < self.neighbors[p].dist:
self.neighbors[p].dist = d
self.neighbors[p].neigh = q
return dict(self.neighbors[p]) # Return plain ol' dict
def _update_point(self, old, new):
"""Update point location, neighbors, and distances.
All distances to point have changed, we need to recompute all aspects
of the data structure that may be affected. Note that although we
completely recompute the neighbors of the original point (`old`), we
don't explicitly rebuild the neighbors map, since that would double the
number of distance computations made by this routine. Also, like
deletion, we don't change any _other_ point's neighbor to the updated
point.
"""
# Out with the old, in with the new...
self.points.remove(old)
self.points.append(new)
if not self.initialized:
return new
del self.neighbors[old]
self.neighbors[new].neigh = new # Flag for not yet found any
self.neighbors[new].dist = float("inf")
for q in self.points:
if q != new:
d = self.dist(new, q)
if d < self.neighbors[new].dist:
self.neighbors[new].dist = d
self.neighbors[new].neigh = q
if self.neighbors[q].neigh == old:
if d > self.neighbors[q].dist:
self._find_neighbor(q)
else:
self.neighbors[q].neigh = new
self.neighbors[q].dist = d
return dict(self.neighbors[new])
# def merge_closest(self):
# dist, (a, b) = self.closest_pair()
# c = self.merge(a, b)
# self -= b
# return self._update_point(a, c)
def _closest_pair_brute_force(pts, dst=dist.euclidean):
"""Compute closest pair of points using brute-force algorithm.
Notes
-----
Computes all possible combinations of points and compares their distances.
This is _not_ efficient, nor scalable, but it provides a useful reference
for more efficient algorithms. This version is actually pretty fast due
to its use of fast Python builtins.
"""
return min((dst(p1, p2), (p1, p2)) for p1, p2 in combinations(pts, r=2))
|
{
"content_hash": "e7242b531d5baedffecf591b826f2d4c",
"timestamp": "",
"source": "github",
"line_count": 280,
"max_line_length": 88,
"avg_line_length": 40.44285714285714,
"alnum_prop": 0.6074708583539385,
"repo_name": "carsonfarmer/fastpair",
"id": "9f7332106f246ec2489d3da6a5d1fadc135616bc",
"size": "11371",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fastpair/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21140"
}
],
"symlink_target": ""
}
|
import requests
from starlette.requests import Request
from typing import Dict
from transformers import pipeline
from ray import serve
# 1: Wrap the pretrained sentiment analysis model in a Serve deployment.
@serve.deployment(route_prefix="/")
class SentimentAnalysisDeployment:
def __init__(self):
self._model = pipeline("sentiment-analysis")
def __call__(self, request: Request) -> Dict:
return self._model(request.query_params["text"])[0]
# 2: Deploy the deployment.
serve.run(SentimentAnalysisDeployment.bind())
# 3: Query the deployment and print the result.
print(
requests.get(
"http://localhost:8000/", params={"text": "Ray Serve is great!"}
).json()
)
# {'label': 'POSITIVE', 'score': 0.9998476505279541}
|
{
"content_hash": "1ac2099e0aaecd5c16e03ddd9083bcaf",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 72,
"avg_line_length": 26.275862068965516,
"alnum_prop": 0.7034120734908137,
"repo_name": "ray-project/ray",
"id": "c5bc266c9265ac9d9f0f83575ab5b54d01a9070d",
"size": "762",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/source/serve/doc_code/transformers_example.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "37490"
},
{
"name": "C++",
"bytes": "5972422"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Cython",
"bytes": "227477"
},
{
"name": "Dockerfile",
"bytes": "20210"
},
{
"name": "HTML",
"bytes": "30382"
},
{
"name": "Java",
"bytes": "1160849"
},
{
"name": "JavaScript",
"bytes": "1128"
},
{
"name": "Jinja",
"bytes": "6371"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "PowerShell",
"bytes": "1114"
},
{
"name": "Python",
"bytes": "19539109"
},
{
"name": "Shell",
"bytes": "134583"
},
{
"name": "Starlark",
"bytes": "334862"
},
{
"name": "TypeScript",
"bytes": "190599"
}
],
"symlink_target": ""
}
|
"""Compute-related Utilities and helpers."""
import re
import string
import traceback
from oslo.config import cfg
from nova import block_device
from nova.compute import flavors
from nova import exception
from nova.network import model as network_model
from nova import notifications
from nova.openstack.common import log
from nova.openstack.common.notifier import api as notifier_api
from nova.openstack.common import timeutils
from nova import utils
from nova.virt import driver
CONF = cfg.CONF
CONF.import_opt('host', 'nova.netconf')
LOG = log.getLogger(__name__)
def add_instance_fault_from_exc(context, conductor,
instance, fault, exc_info=None):
"""Adds the specified fault to the database."""
code = 500
message = fault.__class__.__name__
if hasattr(fault, "kwargs"):
code = fault.kwargs.get('code', 500)
# get the message from the exception that was thrown
# if that does not exist, use the name of the exception class itself
message = fault.kwargs.get('value', message)
details = unicode(fault)
if exc_info and code == 500:
tb = exc_info[2]
details += '\n' + ''.join(traceback.format_tb(tb))
values = {
'instance_uuid': instance['uuid'],
'code': code,
'message': unicode(message),
'details': unicode(details),
'host': CONF.host
}
conductor.instance_fault_create(context, values)
def pack_action_start(context, instance_uuid, action_name):
values = {'action': action_name,
'instance_uuid': instance_uuid,
'request_id': context.request_id,
'user_id': context.user_id,
'project_id': context.project_id,
'start_time': context.timestamp}
return values
def pack_action_finish(context, instance_uuid):
values = {'instance_uuid': instance_uuid,
'request_id': context.request_id,
'finish_time': timeutils.utcnow()}
return values
def pack_action_event_start(context, instance_uuid, event_name):
values = {'event': event_name,
'instance_uuid': instance_uuid,
'request_id': context.request_id,
'start_time': timeutils.utcnow()}
return values
def pack_action_event_finish(context, instance_uuid, event_name, exc_val=None,
exc_tb=None):
values = {'event': event_name,
'instance_uuid': instance_uuid,
'request_id': context.request_id,
'finish_time': timeutils.utcnow()}
if exc_tb is None:
values['result'] = 'Success'
else:
values['result'] = 'Error'
values['message'] = str(exc_val)
values['traceback'] = ''.join(traceback.format_tb(exc_tb))
return values
def get_device_name_for_instance(context, instance, bdms, device):
"""Validates (or generates) a device name for instance.
If device is not set, it will generate a unique device appropriate
for the instance. It uses the block device mapping table to find
valid device names. If the device name is valid but applicable to
a different backend (for example /dev/vdc is specified but the
backend uses /dev/xvdc), the device name will be converted to the
appropriate format.
"""
req_prefix = None
req_letter = None
if device:
try:
req_prefix, req_letter = block_device.match_device(device)
except (TypeError, AttributeError, ValueError):
raise exception.InvalidDevicePath(path=device)
mappings = block_device.instance_block_mapping(instance, bdms)
try:
prefix = block_device.match_device(mappings['root'])[0]
except (TypeError, AttributeError, ValueError):
raise exception.InvalidDevicePath(path=mappings['root'])
# NOTE(vish): remove this when xenapi is setting default_root_device
if driver.compute_driver_matches('xenapi.XenAPIDriver'):
prefix = '/dev/xvd'
if req_prefix != prefix:
LOG.debug(_("Using %(prefix)s instead of %(req_prefix)s"),
{'prefix': prefix, 'req_prefix': req_prefix})
used_letters = set()
for device_path in mappings.itervalues():
letter = block_device.strip_prefix(device_path)
# NOTE(vish): delete numbers in case we have something like
# /dev/sda1
letter = re.sub("\d+", "", letter)
used_letters.add(letter)
# NOTE(vish): remove this when xenapi is properly setting
# default_ephemeral_device and default_swap_device
if driver.compute_driver_matches('xenapi.XenAPIDriver'):
instance_type = flavors.extract_flavor(instance)
if instance_type['ephemeral_gb']:
used_letters.add('b')
if instance_type['swap']:
used_letters.add('c')
if not req_letter:
req_letter = _get_unused_letter(used_letters)
if req_letter in used_letters:
raise exception.DevicePathInUse(path=device)
device_name = prefix + req_letter
return device_name
def _get_unused_letter(used_letters):
doubles = [first + second for second in string.ascii_lowercase
for first in string.ascii_lowercase]
all_letters = set(list(string.ascii_lowercase) + doubles)
letters = list(all_letters - used_letters)
# NOTE(vish): prepend ` so all shorter sequences sort first
letters.sort(key=lambda x: x.rjust(2, '`'))
return letters[0]
def notify_usage_exists(context, instance_ref, current_period=False,
ignore_missing_network_data=True,
system_metadata=None, extra_usage_info=None):
"""Generates 'exists' notification for an instance for usage auditing
purposes.
:param current_period: if True, this will generate a usage for the
current usage period; if False, this will generate a usage for the
previous audit period.
:param ignore_missing_network_data: if True, log any exceptions generated
while getting network info; if False, raise the exception.
:param system_metadata: system_metadata DB entries for the instance,
if not None. *NOTE*: Currently unused here in trunk, but needed for
potential custom modifications.
:param extra_usage_info: Dictionary containing extra values to add or
override in the notification if not None.
"""
audit_start, audit_end = notifications.audit_period_bounds(current_period)
bw = notifications.bandwidth_usage(instance_ref, audit_start,
ignore_missing_network_data)
if system_metadata is None:
system_metadata = utils.instance_sys_meta(instance_ref)
# add image metadata to the notification:
image_meta = notifications.image_meta(system_metadata)
extra_info = dict(audit_period_beginning=str(audit_start),
audit_period_ending=str(audit_end),
bandwidth=bw, image_meta=image_meta)
if extra_usage_info:
extra_info.update(extra_usage_info)
notify_about_instance_usage(context, instance_ref, 'exists',
system_metadata=system_metadata, extra_usage_info=extra_info)
def notify_about_instance_usage(context, instance, event_suffix,
network_info=None, system_metadata=None,
extra_usage_info=None, host=None):
"""
Send a notification about an instance.
:param event_suffix: Event type like "delete.start" or "exists"
:param network_info: Networking information, if provided.
:param system_metadata: system_metadata DB entries for the instance,
if provided.
:param extra_usage_info: Dictionary containing extra values to add or
override in the notification.
:param host: Compute host for the instance, if specified. Default is
CONF.host
"""
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = notifications.info_from_instance(context, instance,
network_info, system_metadata, **extra_usage_info)
if event_suffix.endswith("error"):
level = notifier_api.ERROR
else:
level = notifier_api.INFO
notifier_api.notify(context, 'compute.%s' % host,
'compute.instance.%s' % event_suffix, level,
usage_info)
def notify_about_aggregate_update(context, event_suffix, aggregate_payload):
"""
Send a notification about aggregate update.
:param event_suffix: Event type like "create.start" or "create.end"
:param aggregate_payload: payload for aggregate update
"""
aggregate_identifier = aggregate_payload.get('aggregate_id', None)
if not aggregate_identifier:
aggregate_identifier = aggregate_payload.get('name', None)
if not aggregate_identifier:
LOG.debug(_("No aggregate id or name specified for this "
"notification and it will be ignored"))
return
notifier_api.notify(context, 'aggregate.%s' % aggregate_identifier,
'aggregate.%s' % event_suffix, notifier_api.INFO,
aggregate_payload)
def get_nw_info_for_instance(instance):
info_cache = instance['info_cache'] or {}
cached_nwinfo = info_cache.get('network_info') or []
return network_model.NetworkInfo.hydrate(cached_nwinfo)
def has_audit_been_run(context, conductor, host, timestamp=None):
begin, end = utils.last_completed_audit_period(before=timestamp)
task_log = conductor.task_log_get(context, "instance_usage_audit",
begin, end, host)
if task_log:
return True
else:
return False
def start_instance_usage_audit(context, conductor, begin, end, host,
num_instances):
conductor.task_log_begin_task(context, "instance_usage_audit", begin,
end, host, num_instances,
"Instance usage audit started...")
def finish_instance_usage_audit(context, conductor, begin, end, host, errors,
message):
conductor.task_log_end_task(context, "instance_usage_audit", begin, end,
host, errors, message)
def usage_volume_info(vol_usage):
def null_safe_str(s):
return str(s) if s else ''
tot_refreshed = vol_usage['tot_last_refreshed']
curr_refreshed = vol_usage['curr_last_refreshed']
if tot_refreshed and curr_refreshed:
last_refreshed_time = max(tot_refreshed, curr_refreshed)
elif tot_refreshed:
last_refreshed_time = tot_refreshed
else:
# curr_refreshed must be set
last_refreshed_time = curr_refreshed
usage_info = dict(
volume_id=vol_usage['volume_id'],
tenant_id=vol_usage['project_id'],
user_id=vol_usage['user_id'],
availability_zone=vol_usage['availability_zone'],
instance_id=vol_usage['instance_uuid'],
last_refreshed=null_safe_str(last_refreshed_time),
reads=vol_usage['tot_reads'] + vol_usage['curr_reads'],
read_bytes=vol_usage['tot_read_bytes'] +
vol_usage['curr_read_bytes'],
writes=vol_usage['tot_writes'] + vol_usage['curr_writes'],
write_bytes=vol_usage['tot_write_bytes'] +
vol_usage['curr_write_bytes'])
return usage_info
class EventReporter(object):
"""Context manager to report instance action events."""
def __init__(self, context, conductor, event_name, *instance_uuids):
self.context = context
self.conductor = conductor
self.event_name = event_name
self.instance_uuids = instance_uuids
def __enter__(self):
for uuid in self.instance_uuids:
event = pack_action_event_start(self.context, uuid,
self.event_name)
self.conductor.action_event_start(self.context, event)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for uuid in self.instance_uuids:
event = pack_action_event_finish(self.context, uuid,
self.event_name, exc_val, exc_tb)
self.conductor.action_event_finish(self.context, event)
return False
|
{
"content_hash": "5584fea23cbaced2a62484f52ba736b7",
"timestamp": "",
"source": "github",
"line_count": 344,
"max_line_length": 78,
"avg_line_length": 36.098837209302324,
"alnum_prop": 0.6301336769205992,
"repo_name": "Brocade-OpenSource/OpenStack-DNRM-Nova",
"id": "d40d4cab01ab0c1e561d809bc4c05be5ac210832",
"size": "13080",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nova/compute/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11515074"
},
{
"name": "Shell",
"bytes": "17148"
}
],
"symlink_target": ""
}
|
"""Lingvo layers that used for spectrum augmentation."""
import lingvo.compat as tf
from lingvo.core import base_layer
from lingvo.core import py_utils
_SPECAUGMENT_ARGS = (
'freq_mask_max_bins',
'freq_mask_count',
'use_dynamic_time_mask_max_frames',
'time_mask_max_frames',
'time_mask_count',
'time_mask_max_ratio',
'time_masks_per_frame',
'block_mask_prob',
'block_mask_size',
'freq_warp_max_bins',
'time_warp_bound',
'time_warp_max_frames',
'time_warp_max_ratio',
'freq_noise_max_stddev',
)
def _random_uniform_op(use_stateless_op):
return tf.random.stateless_uniform if use_stateless_op else tf.random.uniform
def _random_normal_op(use_stateless_op):
return tf.random.stateless_normal if use_stateless_op else tf.random.normal
def _global_seed_from_inputs(input_floats):
"""Generates a random seed tensor based on input floats and mode key.
Args:
input_floats: a set of float input tensors that are derived from the input
data (for example, input tokens). The important thing is that these are
usually different for each batch.
Returns:
A tensor of shape=[2] with integer seed tensors derived from the inputs.
"""
timestamp = tf.math.floormod(
tf.cast(1000.0 * tf.timestamp(), dtype=tf.int64), 10000000000)
input_sum = tf.cast(tf.reduce_sum(tf.math.abs(input_floats)), dtype=tf.int64)
return tf.stack([timestamp + input_sum, timestamp - input_sum], axis=-1)
def _hat(x):
"""Hat function.
The hat function is a piecewise linear function defined such that
1) x < -1: _hat(x) = 0
2) -1 <= x < 0: _hat(x) = x + 1
3) 0 <= x < 1: _hat(x) = -x + 1
4) x > 1 : _hat(x) = 0
Args:
x: A tensor.
Returns:
Tensor obtained by element-wise application of the hat function.
"""
return tf.nn.relu(x + 1) - 2 * tf.nn.relu(x) + tf.nn.relu(x - 1)
class SpectrumAugmenter(base_layer.BaseLayer):
"""Performs data augmentation as according to the SpecAug paper.
https://arxiv.org/pdf/1904.08779.pdf
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('freq_mask_max_bins', 15,
'Maximum number of frequency bins of frequency masking.')
p.Define('freq_mask_count', 1,
'Number of times we apply masking on the frequency axis.')
# TODO(danielspark): Deprecate 'use_dynamic_time_mask_max_frames' and
# introduce enum parameter to replace it.
p.Define(
'use_dynamic_time_mask_max_frames', False,
'If true, time_mask_max_frames is determined by '
'time_mask_max_ratio * utterance_length.')
p.Define(
'time_mask_max_frames', 50, 'Maximum number of frames of time masking. '
'Overridden when use_dynamic_time_mask_max_frames = True.')
p.Define(
'time_mask_count', 1,
'Number of times we apply masking on the time axis. '
'Acts as upper-bound when time_masks_per_frame > 0.')
p.Define('time_mask_max_ratio', 1.0,
'Maximum portion allowed for time masking.')
p.Define(
'time_masks_per_frame', 0.0,
'Ratio of number of time masks to be applied against the number '
'of frames. If > 0, multiplicity of the time mask is determined by '
'min(time_masks_per_frame * utterance_length, time_mask_count).')
p.Define('block_mask_prob', 0.0, 'Mask probability for block mask.')
p.Define('block_mask_size', dict(t=32, f=32), 'Block size for block mask.')
p.Define('freq_warp_max_bins', 0,
'Maximum number of frequency bins for shifting in freq warping.')
p.Define(
'time_warp_bound', 'static',
'To be set to either `dynamic` or `static`. '
'If `dynamic`, time warp bound is determined by '
'time_warp_max_ratio * utterance_length. '
'If `static`, time warp bound is determined by '
'min(time_warp_max_frames, time_warp_max_ratio * utterance_length).')
p.Define('time_warp_max_frames', 0,
'Maximum number of frames for shifting in time warping.')
p.Define('time_warp_max_ratio', 0.0,
'Maximum portion of frames for shifting in time warping.')
p.Define('use_noise', False, 'Whether to noisify the time masked region.')
p.Define('gaussian_noise', False, 'Use Gaussian distribution for noise.')
p.Define(
'freq_noise_max_stddev', 0.0,
'Maximum stddev of frequency noise. stddev is sampled by uniform '
'probability in range(0, freq_noise_max_stddev)')
p.Define(
'freq_noise_warmup_steps', 0, 'Number of warm-up steps, in which the '
'freq_noise stddev increases linearly.')
p.Define('unstack', False,
'Whether to unstack features before applying SpecAugment.')
p.Define(
'stack_height', 3,
'Number of frames stacked on top of each other. Only used when '
'`unstack` is true.')
p.Define(
'domain_ids', [0],
'If domain ids was given, this parameters describe which domain '
'will be augmented, e.g. '
'p.domain_ids = [2, 7, 1] '
'p.time_mask_count = [1, 2, 0] '
'implies domain 2 will have 1, 7 has 2 and 1 has 0 time masks. '
'All other domain will not augmented if it exists.')
p.Define(
'use_input_dependent_random_seed', False,
'Whether to use stateless random TensorFlow ops, with seeds'
'determined by the input features. This feature is necessary for'
'applications including federated learning.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
num_domains = len(p.domain_ids)
for field in _SPECAUGMENT_ARGS:
v = getattr(p, field)
if isinstance(v, (list, tuple)):
assert len(v) == num_domains, (
'Length: %d of field: %s does not match total domains: %d' %
(len(v), field, num_domains))
else:
setattr(p, field, [v] * num_domains)
assert p.freq_mask_max_bins[0] > -1
assert p.time_mask_max_frames[0] > -1
assert p.freq_warp_max_bins[0] > -1
assert p.time_warp_max_frames[0] > -1
assert p.freq_noise_max_stddev[0] >= 0.0
def EinsumBBmBm(self, a, b, name=None):
return tf.einsum('b,bm->bm', a, b, name=name)
def EinsumBmtBmBt(self, a, b, name=None):
return tf.einsum('bmt,bm->bt', a, b, name=name)
def EinsumBxycByBxyc(self, a, b, name=None):
return tf.einsum('bxyc,by->bxyc', a, b, name=name)
def EinsumBxycBxBxyc(self, a, b, name=None):
return tf.einsum('bxyc,bx->bxyc', a, b, name=name)
def EinsumBxyBxBxy(self, a, b, name=None):
return tf.einsum('bxy,bx->bxy', a, b, name=name)
def EinsumBxycBzxBzyc(self, a, b, name=None):
return tf.einsum('bxyc,bzx->bzyc', a, b, name=name)
def EinsumBxycBzyBxzc(self, a, b, name=None):
return tf.einsum('bxyc,bzy->bxzc', a, b, name=name)
@property
def augment_weight(self):
# TODO(b/180135215): apply it to other augmentations.
p = self.params
if p.freq_noise_warmup_steps == 0:
return tf.cast(tf.constant(1.), p.dtype)
global_step = tf.cast(py_utils.GetGlobalStep(), p.dtype)
augment_warmup_steps = tf.cast(p.freq_noise_warmup_steps, p.dtype)
return tf.minimum(global_step, augment_warmup_steps) / augment_warmup_steps
def _GetMask(self,
batch_size,
choose_range,
mask_size,
global_seed,
max_length=None,
masks_per_frame=0.0,
multiplicity=1,
dtype=tf.float32,
max_ratio=1.0):
"""Returns fixed size multi-masks starting from random positions.
A multi-mask is a mask obtained by applying multiple masks.
This function when max_length is given:
1) Sample random mask lengths less than max_length with shape
(batch_size, multiplicity).
2) Truncate lengths to a max of (choose_range * max_ratio),
so that each mask is fully contained within the corresponding sequence.
3) Random sample start points of shape (batch_size, multiplicity)
with in (choose_range - lengths).
4) For each batch, multiple masks (whose number is given by the
multiplicity) are constructed.
5) Return a mask of shape (batch_size, mask_size) where masks are
obtained by composing the masks constructed in step 4).
If masks_per_frame > 0, the number is given by
min(masks_per_frame * choose_range, multiplicity).
If not, all the masks are composed. The masked regions are set to zero.
This function when max_length is not given:
1) Sample random mask lengths less than (choose_range * max_ratio)
with shape (batch_size, multiplicity).
2) Proceed to steps 3), 4) and 5) of the above.
Args:
batch_size: Batch size. Integer number.
choose_range: Range within which the masked entries must lie. Tensor of
shape (batch_size,).
mask_size: Size of the mask. Integer number.
global_seed: an integer seed tensor for stateless random ops.
max_length: Maximum number of allowed consecutive masked entries. Integer
number or None.
masks_per_frame: Number of masks per frame. Float number. If > 0, the
multiplicity of the mask is set to be masks_per_frame * choose_range.
multiplicity: Maximum number of total masks. Integer number.
dtype: Data type.
max_ratio: Maximum portion of the entire range allowed to be masked. Float
number.
Returns:
mask: a fixed size multi-mask starting from a random position with shape
(batch_size, mask_size).
"""
p = self.params
# Non-empty random seed values are only used for testing or when using
# stateless random ops. seed_1 and seed_2 are set separately to avoid
# correlation of mask size and mask position.
if p.use_input_dependent_random_seed:
seed_1 = global_seed + 1
seed_2 = global_seed + 2
elif p.random_seed:
seed_1 = p.random_seed + 1
seed_2 = 2 * p.random_seed
else:
seed_1 = p.random_seed
seed_2 = p.random_seed
# Sample lengths for multiple masks.
if max_length and max_length > 0:
max_length = tf.broadcast_to(tf.cast(max_length, dtype), (batch_size,))
else:
max_length = tf.cast(choose_range, dtype=dtype) * max_ratio
random_uniform = _random_uniform_op(p.use_input_dependent_random_seed)
masked_portion = random_uniform(
shape=(batch_size, multiplicity),
minval=0.0,
maxval=1.0,
dtype=dtype,
seed=seed_1)
masked_frame_size = self.EinsumBBmBm(max_length, masked_portion)
masked_frame_size = tf.cast(masked_frame_size, dtype=tf.int32)
# Make sure the sampled length was smaller than max_ratio * length_bound.
# Note that sampling in this way was biased
# (shorter sequence may over-masked.)
choose_range = tf.expand_dims(choose_range, -1)
choose_range = tf.tile(choose_range, [1, multiplicity])
length_bound = tf.cast(choose_range, dtype=dtype)
length_bound = tf.cast(max_ratio * length_bound, dtype=tf.int32)
length = tf.minimum(masked_frame_size, tf.maximum(length_bound, 1))
# Choose starting point.
random_start = random_uniform(
shape=(batch_size, multiplicity), maxval=1.0, seed=seed_2)
start_with_in_valid_range = random_start * tf.cast(
(choose_range - length + 1), dtype=dtype)
start = tf.cast(start_with_in_valid_range, tf.int32)
end = start + length - 1
# Shift starting and end point by small value.
delta = tf.constant(0.1)
start = tf.expand_dims(tf.cast(start, dtype) - delta, -1)
start = tf.tile(start, [1, 1, mask_size])
end = tf.expand_dims(tf.cast(end, dtype) + delta, -1)
end = tf.tile(end, [1, 1, mask_size])
# Construct pre-mask of shape (batch_size, multiplicity, mask_size).
diagonal = tf.expand_dims(
tf.expand_dims(tf.cast(tf.range(mask_size), dtype=dtype), 0), 0)
diagonal = tf.tile(diagonal, [batch_size, multiplicity, 1])
pre_mask = tf.cast(
tf.math.logical_and(diagonal < end, diagonal > start), dtype=dtype)
# Sum masks with appropriate multiplicity.
if masks_per_frame > 0:
multiplicity_weights = tf.tile(
tf.expand_dims(
tf.cast(
tf.range(
tf.cast(multiplicity, dtype=tf.int32), dtype=tf.int32),
dtype=dtype), 0), [batch_size, 1])
multiplicity_tensor = masks_per_frame * tf.cast(choose_range, dtype=dtype)
multiplicity_weights = tf.cast(
multiplicity_weights < multiplicity_tensor, dtype=dtype)
pre_mask = self.EinsumBmtBmBt(pre_mask, multiplicity_weights)
else:
pre_mask = tf.reduce_sum(pre_mask, 1)
mask = tf.cast(1.0 - tf.cast(pre_mask > 0, dtype=dtype), dtype=dtype)
if p.fprop_dtype is not None and p.fprop_dtype != p.dtype:
mask = tf.cast(mask, p.fprop_dtype)
return mask
def _GetWarpMatrix(self,
batch_size,
choose_range,
matrix_size,
global_seed,
max_warp_frames=None,
dtype=tf.float32,
max_ratio=1.0):
"""Returns warp matrices starting from random positions.
In this function when max_warp_frames != None:
1) Sample random warp displacements from the interval
[-max_warp_frames, max_warp_frames) to yield shift tensor
with shape (batch_size,).
2) Truncate lengths to a maximum magnitude of (choose_range * max_ratio),
so that each shift is fully contained within the
corresponding sequence.
3) Random sample origin points of shape (batch_size, multiplicity)
with in [shift, choose_range - shift).
4) Return a batch of 1-D linear maps that fix the boundary points and
shift the origin point by the shift.
When max_warp_frames == None:
1) Sample random warp displacements with magnitudes less than
(choose_range * max_ratio) to yield shift tensor with
shape (batch_size,).
2) Proceed through steps 3), 4).
Args:
batch_size: Batch size. Integer number.
choose_range: Range within which the warp reference points must lie.
Tensor of shape (batch_size,).
matrix_size: Dimension of vector space warp matrix is applied to. Integer
number.
global_seed: an integer seed tensor for stateless random ops.
max_warp_frames: Upper-bound on the warp distance. Integer or None.
dtype: Data type.
max_ratio: Maximum ratio between the shift distance and choose_range.
Float number.
Returns:
warp_matrix: An array of fixed size warp matrices with shape
(batch_size, matrix_size, matrix_size).
"""
p = self.params
# Non-empty random seed values are only used for testing or when using
# stateless random ops. seed_3, seed_4, and seed_5 are set separately to
# avoid correlation of warp magnitude and origin position.
if p.use_input_dependent_random_seed:
seed_3 = global_seed + 3
seed_4 = global_seed + 4
seed_5 = global_seed + 5
elif p.random_seed:
seed_3 = p.random_seed - 1
seed_4 = p.random_seed - 1
seed_5 = 2 * p.random_seed + 1
else:
seed_3 = p.random_seed
seed_4 = p.random_seed
seed_5 = p.random_seed
choose_range_dtype = tf.cast(choose_range, dtype=dtype)
length_upper_bound = tf.cast(max_ratio * choose_range_dtype, dtype=tf.int32)
# Set shift length.
random_uniform = _random_uniform_op(p.use_input_dependent_random_seed)
if max_warp_frames and max_warp_frames > 0:
shift = random_uniform(
shape=(batch_size,),
minval=-1 * max_warp_frames,
maxval=max_warp_frames + 1,
dtype=tf.int32,
seed=seed_3)
else:
random_ratio = random_uniform(
shape=(batch_size,),
minval=-1.0,
maxval=1.0,
dtype=dtype,
seed=seed_4)
shift = tf.cast(random_ratio * tf.cast(length_upper_bound, dtype=dtype),
tf.int32)
# Make sure the sampled length was smaller than max_ratio * length_bound.
# Note that sampling in this way is biased.
# (Shorter sequence may over-masked.)
final_shift = tf.maximum(-length_upper_bound,
tf.minimum(shift, length_upper_bound))
# Choose origin anchor point.
mid_range = tf.cast(choose_range, dtype=tf.int32)
mid_range = tf.maximum(choose_range - 2, 0)
random_origin = random_uniform(shape=(batch_size,), maxval=1.0, seed=seed_5)
origin_with_in_valid_range = random_origin * tf.cast(mid_range, dtype=dtype)
origin = tf.cast(origin_with_in_valid_range, tf.int32) + 1
# Set destination point of the origin anchor point under the warp map.
destination = origin + final_shift
# Cast origin and destination.
origin = tf.cast(origin, dtype=dtype)
destination = tf.cast(destination, dtype=dtype)
return self._ConstructWarpMatrix(
batch_size=batch_size,
matrix_size=matrix_size,
origin=origin,
destination=destination,
choose_range=choose_range_dtype,
dtype=dtype)
def _ConstructWarpMatrix(self, batch_size, matrix_size, origin, destination,
choose_range, dtype):
"""Returns warp matrices according to origin, destination and choose_range.
This function constructs a batch of warp matrices which maps the batch
of origin points to the batch of destination points with fixed boundary
coordinates at 0 and choose_range.
The warping function, defined by the origin anchor point `origin`,
the destination of the origin anchor point `destination` and the
length of the domain in the warping axis `choose_range` is a piecewise
linear map that fixes the points 0 and `choose_range` and maps
`origin` to `destination`.
For the warping matrix to be non-singular, destination must lie in the
range 1<= destination <= choose_range - 1, so a destination
out of this range is adjusted to be in this range before the warping
matrix is constructed.
The warping map can be explicitly written by first defining the slopes:
1) slope_0 = origin / destination.
2) slope_1 = (choose_range - origin) / (choose_range - destination).
3) slope_2 = 1.0.
Then the origin point orig_i of the mapped coordinate i is given by:
1) i < destination: orig_i = slope_0 * i.
2) destination <= i < choose_range:
orig_i = slope_1 * i - (slope_1 - slope_0) * destination.
3) i >= choose_range: orig_i = i.
Denoting n_i = ceil(orig_i), the warp matrix element warp[i][j] is given by:
1) j = n_i: 1 - n_i + orig_i.
2) j = n_i - 1: n_i - orig_i.
3) Otherwise: 0.
Applying the warp matrix to an array of pixels, i.e.,
warped_pixel[i] = sum_j warp[i][j] * pixel[j], one would get
warped_pixel[i] = (n_i-orig_i) pixel[n_i-1] + (1-n_i+orig_i) pixel[n_i].
Args:
batch_size: Batch size. Integer number.
matrix_size: Dimension of the vector space the warp matrix is applied to.
Integer number.
origin: Origin anchor point for warping. Tensor of shape (batch_size,) and
data type dtype.
destination: Destination of the origin anchor point upon warping. Tensor
of shape (batch_size,) and data type dtype.
choose_range: Range within which the warp reference points must lie.
Tensor of shape (batch_size,) data type dtype.
dtype: Data type of origin, destination, choose_range and the output warp
matrix.
Returns:
warp_matrix: An array of fixed size warp matrices with shape
(batch_size, matrix_size, matrix_size).
"""
p = self.params
# Entries of destination must be in the range
# 1 <= destination <= choose_range - 1
# for warp matrix to have non-singular values.
destination = tf.minimum(tf.maximum(destination, 1.0), choose_range - 1.0)
# Construct piece-wise linear function fixing boundary points
# specified by zero, choose_range and matrix size and maps
# the origin anchor point to the destination.
destination_bc = tf.broadcast_to(destination, (matrix_size, batch_size))
destination_bc = tf.transpose(destination_bc)
choose_range_bc = tf.broadcast_to(choose_range, (matrix_size, batch_size))
choose_range_bc = tf.transpose(choose_range_bc)
# Slopes of piece-wise linear function.
slope_0 = origin / destination
slope_1 = (choose_range - origin) / (choose_range - destination)
slope_2 = 1.0
# x is a batch of origin matrices.
# The origin matrix is the matrix such that
# origin[i][j] = Origin coordinate of coordinate i for the warp map.
# Denoting the destination of the origin anchor point in the
# warp map as "dest," the origin coordinate of point i is given by:
# 1) i < dest: slope_0 * i.
# 2) dest <= i < choose_range: slope_1 * i - (slope_1 - slope_0) * dest.
# 3) i >= choose_range: i.
x = tf.broadcast_to(
tf.cast(tf.range(matrix_size), dtype=dtype), (batch_size, matrix_size))
x = (
self.EinsumBBmBm(slope_0, x) +
self.EinsumBBmBm(slope_1 - slope_0, tf.nn.relu(x - destination_bc)) +
self.EinsumBBmBm(slope_2 - slope_1, tf.nn.relu(x - choose_range_bc)))
x = tf.broadcast_to(x, (matrix_size, batch_size, matrix_size))
x = tf.transpose(x, perm=[1, 2, 0])
# y is a batch of coordinate matrices.
# A coordinate matrix is a matrix such that
# coordinate[i][j] = j.
y = tf.broadcast_to(
tf.cast(tf.range(matrix_size), dtype=dtype),
(batch_size, matrix_size, matrix_size))
# Warp matrix is obtained by applying hat function element-wise to (x-y).
# Denoting the origin point of i under the warp map as orig_i,
# and n_i = ceil(orig_i), the warp matrix element warp[i][j] is given by:
# 1) j = n_i: 1 - n_i + orig_i.
# 2) j = n_i - 1: n_i - orig_i.
# 3) Otherwise: 0.
# Applying the warp matrix to pixels, i.e.,
# warped_pixel[i] = sum_j warp[i][j] * original_pixel[j], one would get
# warped_pixel[i] = (n_i - orig_i) * original_pixel[n_i-1]
# + (1 - n_i + orig_i) * original_pixel[n_i].
warp_matrix = x - y
warp_matrix = _hat(warp_matrix)
if p.fprop_dtype is not None and p.fprop_dtype != dtype:
warp_matrix = tf.cast(warp_matrix, p.fprop_dtype)
return warp_matrix
def _FrequencyMask(self,
inputs,
global_seed,
dtype=tf.float32,
domain_id_index=0):
"""Applies frequency masking with given degree to inputs.
Args:
inputs: Batch of input features of shape (batch_size, time_length,
num_freq, channels).
global_seed: an integer seed tensor for stateless random ops.
dtype: Data type.
domain_id_index: domain id index.
Returns:
Inputs with random frequency masking applied.
"""
p = self.params
# Mask parameters.
freq_mask_max_bins = p.freq_mask_max_bins[domain_id_index]
multiplicity = p.freq_mask_count[domain_id_index]
# If masking length or count is zero, do nothing.
if freq_mask_max_bins == 0 or multiplicity == 0:
return inputs
# Arguments to pass to mask generator.
batch_size, _, num_freq, _ = py_utils.GetShape(inputs)
choose_range = tf.cast(
tf.broadcast_to(num_freq, (batch_size,)), dtype=tf.int32)
# Create masks in frequency direction and apply.
block_arrays = self._GetMask(
tf.shape(inputs)[0],
choose_range=choose_range,
mask_size=num_freq,
global_seed=global_seed,
max_length=freq_mask_max_bins,
masks_per_frame=0.0,
multiplicity=multiplicity,
dtype=dtype,
max_ratio=1.0)
return self.EinsumBxycByBxyc(inputs, block_arrays)
def _TimeMask(self,
inputs,
seq_lengths,
global_seed,
noisify=False,
gaussian_noise=False,
dtype=tf.float32,
domain_id_index=0):
"""Applies time masking with given degree to inputs.
Args:
inputs: Batch of input features of shape (batch_size, time_length,
num_freq, channels).
seq_lengths: The actual sequence lengths which mask been sampled of shape
(batch_size,).
global_seed: an integer seed tensor for stateless random ops.
noisify: Whether to noisify the masked out regions.
gaussian_noise: Whether to use gaussian noise when noisifying.
dtype: Data type.
domain_id_index: domain id index.
Returns:
Inputs with random time masking applied.
"""
p = self.params
# Get time masking parameters.
time_mask_max_frames = p.time_mask_max_frames[domain_id_index]
time_masks_per_frame = p.time_masks_per_frame[domain_id_index]
use_dynamic_time_mask_max_frames = \
p.use_dynamic_time_mask_max_frames[domain_id_index]
multiplicity = p.time_mask_count[domain_id_index]
max_ratio = p.time_mask_max_ratio[domain_id_index]
# If maximum mask length is zero, do nothing.
if ((time_mask_max_frames == 0 and not use_dynamic_time_mask_max_frames) or
max_ratio <= 0.0):
return inputs
if multiplicity == 0:
return inputs
seq_lengths = tf.cast(seq_lengths, tf.int32)
batch_size, time_length, _, _ = py_utils.GetShape(inputs)
# When using dynamic time mask size, discard upper-bound on
# maximum allowed frames for time mask.
if use_dynamic_time_mask_max_frames:
time_mask_max_frames = None
# Create masks in time direction and apply.
block_arrays = self._GetMask(
batch_size,
choose_range=seq_lengths,
mask_size=time_length,
global_seed=global_seed,
max_length=time_mask_max_frames,
masks_per_frame=time_masks_per_frame,
multiplicity=multiplicity,
dtype=dtype,
max_ratio=max_ratio)
# Non-empty random seed values are only used for testing or when using
# stateless random ops. seed_6 and seed_7 are set separately to avoid
# correlation of warp magnitude and origin position.
if p.use_input_dependent_random_seed:
seed_6 = global_seed + 6
seed_7 = global_seed + 7
else:
seed_6 = p.random_seed
seed_7 = p.random_seed
outputs = self.EinsumBxycBxBxyc(
inputs, block_arrays, name='einsum_formasking')
if noisify:
# Sample noise with standard deviation with factor * 0.1 + 0.0001
# TODO(ngyuzh): Make sure this won't affect EOS.
if gaussian_noise:
stddev = 1.0
else:
random_uniform = _random_uniform_op(p.use_input_dependent_random_seed)
factor = random_uniform(
shape=(), minval=1.0, maxval=2.0, dtype=dtype, seed=seed_6)
stddev = factor * 0.1 + 0.0001
random_normal = _random_normal_op(p.use_input_dependent_random_seed)
noise = random_normal(
shape=[tf.shape(inputs)[0],
tf.shape(inputs)[1],
tf.shape(inputs)[2]],
stddev=stddev,
seed=seed_7)
if p.fprop_dtype is not None and p.fprop_dtype != p.dtype:
noise = tf.cast(noise, p.fprop_dtype)
outputs_mask = self.EinsumBxyBxBxy(
noise, 1.0 - block_arrays, name='einsum_fornoisymasking')
outputs = outputs + tf.expand_dims(outputs_mask, -1)
return outputs
def _BlockMask(self,
inputs,
global_seed,
dtype=tf.float32,
domain_id_index=0):
"""Applies block masking with given degree to inputs.
Args:
inputs: Batch of input features of shape (batch_size, time_length,
num_freq, channels).
global_seed: an integer seed tensor for stateless random ops.
dtype: Data type.
domain_id_index: domain id index.
Returns:
Inputs with random frequency masking applied.
"""
p = self.params
# Mask parameters.
block_mask_prob = p.block_mask_prob[domain_id_index]
block_mask_size = p.block_mask_size[domain_id_index]
# If masking length or count is zero, do nothing.
if block_mask_prob == 0.0:
return inputs
# Prepare random function.
random_uniform = _random_uniform_op(p.use_input_dependent_random_seed)
global_seed = global_seed or p.random_seed
if global_seed:
seed_1 = global_seed
seed_2 = global_seed + 1
else:
seed_1 = seed_2 = None
# Usually channel=1.
batch_size, t_len_orig, f_len_orig, channel = py_utils.GetShape(inputs)
logmel = tf.reshape(inputs, [batch_size, t_len_orig, -1])
f_len_orig *= channel
# Determine batch_mask_prob ~ uniform(0, block_mask_prob).
batch_mask_prob = random_uniform(
shape=[batch_size],
minval=0.0,
maxval=block_mask_prob,
dtype=dtype,
seed=seed_1)
# Pad logmel for block-wise operations. XLA requires compile-time fixed pad.
t_block, f_block = block_mask_size['t'], block_mask_size['f']
t_pad = (t_block - t_len_orig % t_block) % t_block
f_pad = (f_block - f_len_orig % f_block) % f_block
logmel = tf.pad(logmel, [[0, 0], [0, t_pad], [0, f_pad]])
# Prepare logmel for masking.
_, t_len, f_len = py_utils.GetShape(logmel)
num_t_blk, num_f_blk = t_len // t_block, f_len // f_block
block_major_logmel = tf.reshape(
logmel, [batch_size, num_t_blk, t_block, num_f_blk, f_block])
# Generate a mask.
mask = random_uniform(
shape=[batch_size, num_t_blk, 1, num_f_blk, 1],
minval=0.0,
maxval=1.0,
dtype=dtype,
seed=seed_2)
mask = tf.greater(
mask, batch_mask_prob[:, tf.newaxis, tf.newaxis, tf.newaxis,
tf.newaxis])
mask = tf.stop_gradient(tf.cast(mask, dtype=logmel.dtype))
# Apply the mask to logmel.
masked_logmel = mask * block_major_logmel
masked_logmel = tf.reshape(masked_logmel, [batch_size, t_len, f_len])
masked_logmel = masked_logmel[:, :t_len_orig, :f_len_orig]
outputs = tf.reshape(masked_logmel, py_utils.GetShape(inputs))
return outputs
def _FrequencyWarp(self,
inputs,
global_seed,
dtype=tf.float32,
domain_id_index=0):
"""Applies frequency warping with given degree to inputs.
Args:
inputs: Batch of input features of shape (batch_size, time_length,
num_freq, channels).
global_seed: an integer seed tensor for stateless random ops.
dtype: Data type.
domain_id_index: Domain ID index.
Returns:
Inputs with random frequency warping applied.
"""
p = self.params
batch_size, _, num_freq, _ = py_utils.GetShape(inputs)
# Get parameters for warping.
freq_warp_max_bins = p.freq_warp_max_bins[domain_id_index]
# If maximum warp length is zero, do nothing.
if freq_warp_max_bins == 0:
return inputs
choose_range = tf.ones((batch_size,), dtype=tf.int32) * num_freq
# Create warping matrix in time direction and apply
warp_matrix = self._GetWarpMatrix(
batch_size,
choose_range=choose_range,
matrix_size=num_freq,
global_seed=global_seed,
max_warp_frames=freq_warp_max_bins,
dtype=dtype)
return self.EinsumBxycBzyBxzc(
inputs, warp_matrix, name='einsum_forfreqwarping')
def _TimeWarp(self,
inputs,
seq_lengths,
global_seed,
dtype=tf.float32,
domain_id_index=0):
"""Applies time warping with given degree to inputs.
Args:
inputs: Batch of input features of shape (batch_size, time_length,
num_freq, channels).
seq_lengths: The actual sequence lengths which mask been sampled of shape
(batch_size,).
global_seed: an integer seed tensor for stateless random ops.
dtype: Data type.
domain_id_index: Domain ID index.
Returns:
Inputs with random time warping applied.
"""
p = self.params
batch_size, time_length, _, _ = py_utils.GetShape(inputs)
# Get parameters for warping.
time_warp_max_frames = p.time_warp_max_frames[domain_id_index]
max_ratio = p.time_warp_max_ratio[domain_id_index]
time_warp_bound = p.time_warp_bound[domain_id_index]
assert time_warp_bound in ('static', 'dynamic')
# If maximum warp length is zero, do nothing.
if ((time_warp_max_frames == 0 and time_warp_bound == 'static') or
max_ratio <= 0.0):
return inputs
seq_lengths = tf.cast(seq_lengths, tf.int32)
# Discard upper-bound on time-warp frames when
# dynamic time warping is used.
if time_warp_bound == 'dynamic':
time_warp_max_frames = None
# Create warping matrix in time direction and apply
warp_matrix = self._GetWarpMatrix(
batch_size,
choose_range=seq_lengths,
matrix_size=time_length,
global_seed=global_seed,
max_warp_frames=time_warp_max_frames,
dtype=dtype,
max_ratio=max_ratio)
return self.EinsumBxycBzxBzyc(inputs, warp_matrix, name='einsum_forwarping')
def _FrequencyNoise(self,
inputs,
global_seed,
dtype=tf.float32,
domain_id_index=0):
"""Applies frequency noise with given degree to inputs.
It samples N(1, stddev) in frequency space. e.g. stddev = 0.1
It multiplies the noise scale vector to logmel features.
Rationale of this augmentation:
1) Multiplication in freq space is convolution in time space.
2) White noise has a periodical pattern in time space, which can be
represented by convolution operation [1][2].
3) So multiplication in freq space imitates white noise.
[1] https://ccrma.stanford.edu/~jos/sasp/Filtered_White_Noise.html
[2] https://en.wikipedia.org/wiki/White_noise#Discrete-time_white_noise
Args:
inputs: Batch of input features of shape (batch_size, time_length,
num_freq, channels).
global_seed: an integer seed tensor for stateless random ops.
dtype: Data type.
domain_id_index: Domain ID index.
Returns:
Inputs with random frequency warping applied.
"""
p = self.params
batch_size, _, num_freq, _ = py_utils.GetShape(inputs)
# Get parameters for noise.
freq_noise_max_stddev = p.freq_noise_max_stddev[domain_id_index]
# If maximum noise stddev is zero, do nothing.
if freq_noise_max_stddev <= 0.0:
return inputs
freq_noise_max_stddev = freq_noise_max_stddev * self.augment_weight
# Non-empty random seed values are only used for testing or when using
# stateless random ops. seed_1 and seed_2 are set separately to avoid
# correlation of random_uniform and random_normal.
if p.use_input_dependent_random_seed:
seed_1 = global_seed + 1
seed_2 = global_seed + 2
elif p.random_seed:
seed_1 = p.random_seed + 1
seed_2 = 2 * p.random_seed
else:
seed_1 = p.random_seed
seed_2 = p.random_seed
random_uniform = _random_uniform_op(p.use_input_dependent_random_seed)
random_normal = _random_normal_op(p.use_input_dependent_random_seed)
stddev = random_uniform(
shape=[batch_size, 1, 1, 1],
minval=0,
maxval=freq_noise_max_stddev,
seed=seed_1)
noise_scale = random_normal(
shape=[batch_size, 1, num_freq, 1], mean=1., stddev=stddev, seed=seed_2)
return inputs * noise_scale
def UnstackFeatures(self, src_inputs, src_paddings):
"""Unstacks src_input and src_paddings based off stack height."""
sh = self.params.stack_height
bs, old_series_length, _, channels = py_utils.GetShape(src_inputs)
unstacked_series_length = old_series_length * sh
src_inputs = tf.reshape(src_inputs,
[bs, unstacked_series_length, -1, channels])
content = 1 - src_paddings
lengths = tf.cast(sh * tf.reduce_sum(content, axis=1), tf.int32)
mask = tf.sequence_mask(lengths, maxlen=unstacked_series_length)
src_paddings = 1 - tf.cast(mask, tf.int32)
return src_inputs, src_paddings
def _AugmentationNetwork(self,
inputs,
paddings,
global_seed,
domain_id_index=0):
"""Returns augmented features.
Args:
inputs: Batch of input features of shape (batch_size, time_length,
num_freq, channels).
paddings: Batch of padding vectors of shape (batch_size, time_length).
global_seed: an integer seed tensor for stateless random ops.
domain_id_index: domain id index.
Returns:
Batch of output features of shape (batch_size, time_length, num_freq,
channels) obtained by applying random augmentations to inputs.
"""
p = self.params
dtype = p.dtype
# Unstack the features.
if p.unstack:
original_shape = tf.shape(inputs)
inputs, paddings = self.UnstackFeatures(inputs, paddings)
lengths = tf.reduce_sum(1 - paddings, 1)
inputs = self._FrequencyWarp(
inputs,
global_seed=global_seed,
dtype=dtype,
domain_id_index=domain_id_index)
inputs = self._TimeWarp(
inputs,
lengths,
global_seed=global_seed,
dtype=dtype,
domain_id_index=domain_id_index)
inputs = self._FrequencyNoise(
inputs,
global_seed=global_seed,
dtype=dtype,
domain_id_index=domain_id_index)
inputs = self._TimeMask(
inputs,
lengths,
global_seed=global_seed,
noisify=p.use_noise,
gaussian_noise=p.gaussian_noise,
dtype=dtype,
domain_id_index=domain_id_index)
inputs = self._FrequencyMask(
inputs,
global_seed=global_seed,
dtype=dtype,
domain_id_index=domain_id_index)
inputs = self._BlockMask(
inputs,
global_seed=global_seed,
dtype=dtype,
domain_id_index=domain_id_index)
# Restack the features after applying specaugment.
if p.unstack:
inputs = tf.reshape(inputs, original_shape)
return inputs
def FProp(self, theta, inputs, paddings, domain_ids=None):
"""Applies data augmentation by randomly mask spectrum in inputs.
Args:
theta: A NestedMap object containing weights' values of this layer and its
children layers.
inputs: A tensor of shape [batch, time, freq, num_channels].
paddings: A 0/1 tensor of shape [batch, time].
domain_ids: input domain_ids of shape [batch, time].
Returns:
A pair of 2 tensors:
- augmented_inputs: A tensor of shape [batch, time, freq, num_channels].
- paddings: A 0/1 tensor of shape [batch, time].
"""
p = self.params
global_seed = None # A tensor seed in case stateless random ops are needed.
if p.use_input_dependent_random_seed:
global_seed = _global_seed_from_inputs(inputs)
batch_size = py_utils.GetShape(inputs)[0]
if len(p.domain_ids) > 1:
augmented_inputs = tf.zeros_like(inputs)
original_inputs = inputs
for i, domain_id in enumerate(p.domain_ids):
augmented_domain = self._AugmentationNetwork(
inputs,
paddings,
global_seed=global_seed,
domain_id_index=i)
target_domain = tf.cast(
tf.expand_dims(tf.tile([domain_id], [batch_size]), -1),
dtype=p.dtype)
# [batch, time].
domain_mask = tf.cast(
tf.equal(domain_ids, target_domain), dtype=p.dtype)
augmented_domain = self.EinsumBxycBxBxyc(
augmented_domain, domain_mask, name='einsum_domainmasking')
original_inputs = self.EinsumBxycBxBxyc(
original_inputs, 1.0 - domain_mask, name='einsum_domainmasking2')
augmented_inputs = augmented_domain + augmented_inputs
augmented_inputs = original_inputs + augmented_inputs
else:
augmented_inputs = self._AugmentationNetwork(
inputs,
paddings,
global_seed=global_seed,
domain_id_index=0)
return augmented_inputs, paddings
|
{
"content_hash": "5c46458115fbee73ba876159b796a910",
"timestamp": "",
"source": "github",
"line_count": 1059,
"max_line_length": 80,
"avg_line_length": 38.29745042492918,
"alnum_prop": 0.6301994723475602,
"repo_name": "tensorflow/lingvo",
"id": "35a33b131e9feb07f25f7990c09b7e68b02d895d",
"size": "41246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lingvo/core/spectrum_augmenter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5163"
},
{
"name": "C++",
"bytes": "556344"
},
{
"name": "Dockerfile",
"bytes": "8484"
},
{
"name": "Jupyter Notebook",
"bytes": "36721"
},
{
"name": "Python",
"bytes": "9574124"
},
{
"name": "Shell",
"bytes": "50408"
},
{
"name": "Starlark",
"bytes": "182688"
},
{
"name": "TeX",
"bytes": "37275"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
setup(
name = "Boids",
version = "1.0",
description = "The Boids!",
author = "Irina Grigorescu",
author_email = "irina.grigorescu.15@ucl.ac.uk",
url = "https://github.com/irinagrigorescu/bad_boids",
packages = find_packages(exclude=['*test']),
scripts = ['scripts/boids'],
install_requires = ['argparse', 'numpy', 'matplotlib']
)
|
{
"content_hash": "6a8eb7a1587e6fcdf0beb893f183c75c",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 58,
"avg_line_length": 31.615384615384617,
"alnum_prop": 0.6399026763990268,
"repo_name": "irinagrigorescu/bad_boids",
"id": "559061af8e9b8f779e26f1ed4b222e2349535918",
"size": "411",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17699"
}
],
"symlink_target": ""
}
|
import SimpleHTTPServer
import SocketServer
import subprocess
import signal
import os
PORT = 8080
def killapp(name):
p = subprocess.Popen(['ps', '-A'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if name in line:
pid = int(line.split(None, 1)[0])
os.kill(pid, signal.SIGKILL)
class action(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
if self.path == '/':
print 'Index'
self.path = '/'
elif self.path == '/kill_atari?':
# Termino proceso del emulador de Atari
killapp('retroarch')
self.path = '/'
elif self.path == '/kill_snes?':
# Termino proceso del emulador de Super Nintendo
killapp('snes9x')
self.path = '/'
elif self.path == '/kill_nes?':
# Termino proceso del emulador de Nintendo
killapp('retroarch')
self.path = '/'
elif self.path == '/restart_retropie?':
# Termino proceso de RetroPie (vuelve a la consola) y lo inicio nuevamente
command = "killall emulationstation"
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
self.path = '/'
elif self.path == '/rpi_shutdown?':
# Apago la RPI
command = "sudo init 0"
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
self.path = '/'
elif self.path == '/rpi_reboot?':
# Reinicio la RPI
command = "/usr/bin/sudo /sbin/shutdown -r now"
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
self.path = '/'
return SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
Handler = action
httpd = SocketServer.TCPServer(("", PORT), Handler)
print "serving at port", PORT
httpd.serve_forever()
|
{
"content_hash": "9c1e85a13456725de482b2385b659e86",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 77,
"avg_line_length": 28,
"alnum_prop": 0.6785714285714286,
"repo_name": "matisanh/retropie-administrator",
"id": "181fa05fb88f92afe3858954335b33b3e1b27e16",
"size": "1652",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "127063"
},
{
"name": "HTML",
"bytes": "9078"
},
{
"name": "JavaScript",
"bytes": "43232"
},
{
"name": "Python",
"bytes": "1652"
},
{
"name": "Shell",
"bytes": "70"
}
],
"symlink_target": ""
}
|
from scoring_engine.engine.basic_check import BasicCheck, CHECKS_BIN_PATH
class SSHCheck(BasicCheck):
required_properties = ['commands']
CMD = CHECKS_BIN_PATH + '/ssh_check {0} {1} {2} {3} {4}'
def command_format(self, properties):
account = self.get_random_account()
return (
self.host,
self.port,
account.username,
account.password,
properties['commands']
)
|
{
"content_hash": "36bacd6889e11e209b4f4a054dda5591",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 73,
"avg_line_length": 28.6875,
"alnum_prop": 0.5816993464052288,
"repo_name": "pwnbus/scoring_engine",
"id": "ce97747b3c1dfd51159bb6a201154b652c2db6d0",
"size": "459",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scoring_engine/checks/ssh.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2429"
},
{
"name": "HTML",
"bytes": "59933"
},
{
"name": "Makefile",
"bytes": "947"
},
{
"name": "PHP",
"bytes": "768"
},
{
"name": "Python",
"bytes": "176653"
},
{
"name": "Shell",
"bytes": "4473"
}
],
"symlink_target": ""
}
|
import six
from oslo_log import log as logging
from tempest_lib import exceptions as lib_exc
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class AuthorizationTestJSON(base.BaseV2ComputeTest):
credentials = ['primary', 'alt']
@classmethod
def skip_checks(cls):
super(AuthorizationTestJSON, cls).skip_checks()
if not CONF.service_available.glance:
raise cls.skipException('Glance is not available.')
@classmethod
def setup_credentials(cls):
# No network resources required for this test
cls.set_network_resources()
super(AuthorizationTestJSON, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(AuthorizationTestJSON, cls).setup_clients()
cls.client = cls.os.servers_client
cls.images_client = cls.os.images_client
cls.glance_client = cls.os.image_client
cls.keypairs_client = cls.os.keypairs_client
cls.security_client = cls.os.security_groups_client
cls.alt_client = cls.alt_manager.servers_client
cls.alt_images_client = cls.alt_manager.images_client
cls.alt_keypairs_client = cls.alt_manager.keypairs_client
cls.alt_security_client = cls.alt_manager.security_groups_client
@classmethod
def resource_setup(cls):
super(AuthorizationTestJSON, cls).resource_setup()
server = cls.create_test_server(wait_until='ACTIVE')
cls.server = cls.client.show_server(server['id'])
name = data_utils.rand_name('image')
body = cls.glance_client.create_image(name=name,
container_format='bare',
disk_format='raw',
is_public=False)
image_id = body['id']
image_file = six.StringIO(('*' * 1024))
body = cls.glance_client.update_image(image_id, data=image_file)
cls.glance_client.wait_for_image_status(image_id, 'active')
cls.image = cls.images_client.show_image(image_id)
cls.keypairname = data_utils.rand_name('keypair')
cls.keypairs_client.create_keypair(cls.keypairname)
name = data_utils.rand_name('security')
description = data_utils.rand_name('description')
cls.security_group = cls.security_client.create_security_group(
name, description)
parent_group_id = cls.security_group['id']
ip_protocol = 'tcp'
from_port = 22
to_port = 22
cls.rule = cls.security_client.create_security_group_rule(
parent_group_id, ip_protocol, from_port, to_port)
@classmethod
def resource_cleanup(cls):
if hasattr(cls, 'image'):
cls.images_client.delete_image(cls.image['id'])
if hasattr(cls, 'keypairname'):
cls.keypairs_client.delete_keypair(cls.keypairname)
if hasattr(cls, 'security_group'):
cls.security_client.delete_security_group(cls.security_group['id'])
super(AuthorizationTestJSON, cls).resource_cleanup()
@test.idempotent_id('56816e4a-bd34-47b5-aee9-268c3efeb5d4')
def test_get_server_for_alt_account_fails(self):
# A GET request for a server on another user's account should fail
self.assertRaises(lib_exc.NotFound, self.alt_client.show_server,
self.server['id'])
@test.idempotent_id('fb8a4870-6d9d-44ad-8375-95d52e98d9f6')
def test_delete_server_for_alt_account_fails(self):
# A DELETE request for another user's server should fail
self.assertRaises(lib_exc.NotFound, self.alt_client.delete_server,
self.server['id'])
@test.idempotent_id('d792f91f-1d49-4eb5-b1ff-b229c4b9dc64')
def test_update_server_for_alt_account_fails(self):
# An update server request for another user's server should fail
self.assertRaises(lib_exc.NotFound, self.alt_client.update_server,
self.server['id'], name='test')
@test.idempotent_id('488f24df-d7f7-4207-949a-f17fcb8e8769')
def test_list_server_addresses_for_alt_account_fails(self):
# A list addresses request for another user's server should fail
self.assertRaises(lib_exc.NotFound, self.alt_client.list_addresses,
self.server['id'])
@test.idempotent_id('00b442d0-2e72-40e7-9b1f-31772e36da01')
def test_list_server_addresses_by_network_for_alt_account_fails(self):
# A list address/network request for another user's server should fail
server_id = self.server['id']
self.assertRaises(lib_exc.NotFound,
self.alt_client.list_addresses_by_network, server_id,
'public')
@test.idempotent_id('cc90b35a-19f0-45d2-b680-2aabf934aa22')
def test_list_servers_with_alternate_tenant(self):
# A list on servers from one tenant should not
# show on alternate tenant
# Listing servers from alternate tenant
alt_server_ids = []
body = self.alt_client.list_servers()
alt_server_ids = [s['id'] for s in body['servers']]
self.assertNotIn(self.server['id'], alt_server_ids)
@test.idempotent_id('376dbc16-0779-4384-a723-752774799641')
def test_change_password_for_alt_account_fails(self):
# A change password request for another user's server should fail
self.assertRaises(lib_exc.NotFound, self.alt_client.change_password,
self.server['id'], 'newpass')
@test.idempotent_id('14cb5ff5-f646-45ca-8f51-09081d6c0c24')
def test_reboot_server_for_alt_account_fails(self):
# A reboot request for another user's server should fail
self.assertRaises(lib_exc.NotFound, self.alt_client.reboot,
self.server['id'], 'HARD')
@test.idempotent_id('8a0bce51-cd00-480b-88ba-dbc7d8408a37')
def test_rebuild_server_for_alt_account_fails(self):
# A rebuild request for another user's server should fail
self.assertRaises(lib_exc.NotFound, self.alt_client.rebuild,
self.server['id'], self.image_ref_alt)
@test.idempotent_id('e4da647e-f982-4e61-9dad-1d1abebfb933')
def test_resize_server_for_alt_account_fails(self):
# A resize request for another user's server should fail
self.assertRaises(lib_exc.NotFound, self.alt_client.resize,
self.server['id'], self.flavor_ref_alt)
@test.idempotent_id('a9fe8112-0ffa-4902-b061-f892bd5fe0d3')
def test_create_image_for_alt_account_fails(self):
# A create image request for another user's server should fail
self.assertRaises(lib_exc.NotFound,
self.alt_images_client.create_image,
self.server['id'], 'testImage')
@test.idempotent_id('95d445f6-babc-4f2e-aea3-aa24ec5e7f0d')
def test_create_server_with_unauthorized_image(self):
# Server creation with another user's image should fail
self.assertRaises(lib_exc.BadRequest, self.alt_client.create_server,
'test', self.image['id'], self.flavor_ref)
@test.idempotent_id('acf8724b-142b-4044-82c3-78d31a533f24')
def test_create_server_fails_when_tenant_incorrect(self):
# A create server request should fail if the tenant id does not match
# the current user
# Change the base URL to impersonate another user
self.alt_client.auth_provider.set_alt_auth_data(
request_part='url',
auth_data=self.client.auth_provider.auth_data
)
self.assertRaises(lib_exc.BadRequest,
self.alt_client.create_server, 'test',
self.image['id'], self.flavor_ref)
@test.idempotent_id('f03d1ded-7fd4-4d29-bc13-e2391f29c625')
def test_create_keypair_in_analt_user_tenant(self):
# A create keypair request should fail if the tenant id does not match
# the current user
# POST keypair with other user tenant
k_name = data_utils.rand_name('keypair')
try:
# Change the base URL to impersonate another user
self.alt_keypairs_client.auth_provider.set_alt_auth_data(
request_part='url',
auth_data=self.keypairs_client.auth_provider.auth_data
)
resp = {}
resp['status'] = None
self.assertRaises(lib_exc.BadRequest,
self.alt_keypairs_client.create_keypair, k_name)
finally:
# Next request the base_url is back to normal
if (resp['status'] is not None):
self.alt_keypairs_client.delete_keypair(k_name)
LOG.error("Create keypair request should not happen "
"if the tenant id does not match the current user")
@test.idempotent_id('85bcdd8f-56b4-4868-ae56-63fbf6f7e405')
def test_get_keypair_of_alt_account_fails(self):
# A GET request for another user's keypair should fail
self.assertRaises(lib_exc.NotFound,
self.alt_keypairs_client.show_keypair,
self.keypairname)
@test.idempotent_id('6d841683-a8e0-43da-a1b8-b339f7692b61')
def test_delete_keypair_of_alt_account_fails(self):
# A DELETE request for another user's keypair should fail
self.assertRaises(lib_exc.NotFound,
self.alt_keypairs_client.delete_keypair,
self.keypairname)
@test.idempotent_id('fcb2e144-36e3-4dfb-9f9f-e72fcdec5656')
def test_get_image_for_alt_account_fails(self):
# A GET request for an image on another user's account should fail
self.assertRaises(lib_exc.NotFound,
self.alt_images_client.show_image, self.image['id'])
@test.idempotent_id('9facb962-f043-4a9d-b9ee-166a32dea098')
def test_delete_image_for_alt_account_fails(self):
# A DELETE request for another user's image should fail
self.assertRaises(lib_exc.NotFound,
self.alt_images_client.delete_image,
self.image['id'])
@test.idempotent_id('752c917e-83be-499d-a422-3559127f7d3c')
def test_create_security_group_in_analt_user_tenant(self):
# A create security group request should fail if the tenant id does not
# match the current user
# POST security group with other user tenant
s_name = data_utils.rand_name('security')
s_description = data_utils.rand_name('security')
try:
# Change the base URL to impersonate another user
self.alt_security_client.auth_provider.set_alt_auth_data(
request_part='url',
auth_data=self.security_client.auth_provider.auth_data
)
resp = {}
resp['status'] = None
self.assertRaises(lib_exc.BadRequest,
self.alt_security_client.create_security_group,
s_name, s_description)
finally:
# Next request the base_url is back to normal
if resp['status'] is not None:
self.alt_security_client.delete_security_group(resp['id'])
LOG.error("Create Security Group request should not happen if"
"the tenant id does not match the current user")
@test.idempotent_id('9db3590f-4d15-4e5f-985e-b28514919a6f')
def test_get_security_group_of_alt_account_fails(self):
# A GET request for another user's security group should fail
self.assertRaises(lib_exc.NotFound,
self.alt_security_client.show_security_group,
self.security_group['id'])
@test.idempotent_id('155387a5-2bbc-4acf-ab06-698dae537ea5')
def test_delete_security_group_of_alt_account_fails(self):
# A DELETE request for another user's security group should fail
self.assertRaises(lib_exc.NotFound,
self.alt_security_client.delete_security_group,
self.security_group['id'])
@test.idempotent_id('b2b76de0-210a-4089-b921-591c9ec552f6')
def test_create_security_group_rule_in_analt_user_tenant(self):
# A create security group rule request should fail if the tenant id
# does not match the current user
# POST security group rule with other user tenant
parent_group_id = self.security_group['id']
ip_protocol = 'icmp'
from_port = -1
to_port = -1
try:
# Change the base URL to impersonate another user
self.alt_security_client.auth_provider.set_alt_auth_data(
request_part='url',
auth_data=self.security_client.auth_provider.auth_data
)
resp = {}
resp['status'] = None
self.assertRaises(lib_exc.BadRequest,
self.alt_security_client.
create_security_group_rule,
parent_group_id, ip_protocol, from_port,
to_port)
finally:
# Next request the base_url is back to normal
if resp['status'] is not None:
self.alt_security_client.delete_security_group_rule(resp['id'])
LOG.error("Create security group rule request should not "
"happen if the tenant id does not match the"
" current user")
@test.idempotent_id('c6044177-37ef-4ce4-b12c-270ddf26d7da')
def test_delete_security_group_rule_of_alt_account_fails(self):
# A DELETE request for another user's security group rule
# should fail
self.assertRaises(lib_exc.NotFound,
self.alt_security_client.delete_security_group_rule,
self.rule['id'])
@test.idempotent_id('c5f52351-53d9-4fc9-83e5-917f7f5e3d71')
def test_set_metadata_of_alt_account_server_fails(self):
# A set metadata for another user's server should fail
req_metadata = {'meta1': 'data1', 'meta2': 'data2'}
self.assertRaises(lib_exc.NotFound,
self.alt_client.set_server_metadata,
self.server['id'],
req_metadata)
@test.idempotent_id('fb6f51e9-df15-4939-898d-1aca38c258f0')
def test_set_metadata_of_alt_account_image_fails(self):
# A set metadata for another user's image should fail
req_metadata = {'meta1': 'value1', 'meta2': 'value2'}
self.assertRaises(lib_exc.NotFound,
self.alt_images_client.set_image_metadata,
self.image['id'], req_metadata)
@test.idempotent_id('dea1936a-473d-49f2-92ad-97bb7aded22e')
def test_get_metadata_of_alt_account_server_fails(self):
# A get metadata for another user's server should fail
req_metadata = {'meta1': 'data1'}
self.client.set_server_metadata(self.server['id'], req_metadata)
self.addCleanup(self.client.delete_server_metadata_item,
self.server['id'], 'meta1')
self.assertRaises(lib_exc.NotFound,
self.alt_client.get_server_metadata_item,
self.server['id'], 'meta1')
@test.idempotent_id('16b2d724-0d3b-4216-a9fa-97bd4d9cf670')
def test_get_metadata_of_alt_account_image_fails(self):
# A get metadata for another user's image should fail
req_metadata = {'meta1': 'value1'}
self.addCleanup(self.images_client.delete_image_metadata_item,
self.image['id'], 'meta1')
self.images_client.set_image_metadata(self.image['id'],
req_metadata)
self.assertRaises(lib_exc.NotFound,
self.alt_images_client.show_image_metadata_item,
self.image['id'], 'meta1')
@test.idempotent_id('79531e2e-e721-493c-8b30-a35db36fdaa6')
def test_delete_metadata_of_alt_account_server_fails(self):
# A delete metadata for another user's server should fail
req_metadata = {'meta1': 'data1'}
self.addCleanup(self.client.delete_server_metadata_item,
self.server['id'], 'meta1')
self.client.set_server_metadata(self.server['id'], req_metadata)
self.assertRaises(lib_exc.NotFound,
self.alt_client.delete_server_metadata_item,
self.server['id'], 'meta1')
@test.idempotent_id('a5175dcf-cef8-43d6-9b77-3cb707d62e94')
def test_delete_metadata_of_alt_account_image_fails(self):
# A delete metadata for another user's image should fail
req_metadata = {'meta1': 'data1'}
self.addCleanup(self.images_client.delete_image_metadata_item,
self.image['id'], 'meta1')
self.images_client.set_image_metadata(self.image['id'],
req_metadata)
self.assertRaises(lib_exc.NotFound,
self.alt_images_client.delete_image_metadata_item,
self.image['id'], 'meta1')
@test.idempotent_id('b0c1e7a0-8853-40fd-8384-01f93d116cae')
def test_get_console_output_of_alt_account_server_fails(self):
# A Get Console Output for another user's server should fail
self.assertRaises(lib_exc.NotFound,
self.alt_client.get_console_output,
self.server['id'], 10)
|
{
"content_hash": "b6ed5177aaf44cb73cdc6d5dc35f9b89",
"timestamp": "",
"source": "github",
"line_count": 375,
"max_line_length": 79,
"avg_line_length": 47.650666666666666,
"alnum_prop": 0.6126811796966815,
"repo_name": "redhat-cip/tempest",
"id": "58c2206965b305fcf670dfdb4fc7c26407f94761",
"size": "18505",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tempest/api/compute/test_authorization.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2691544"
},
{
"name": "Shell",
"bytes": "8578"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from cStringIO import StringIO
import glob
import json
import logging
import optparse
import os
import re
import subprocess
import sys
from kudu_util import get_upstream_commit, check_output, ROOT, Colors, init_logging
import iwyu.fix_includes
from iwyu.fix_includes import ParseAndMergeIWYUOutput
_USAGE = """\
%prog [--fix] [--sort-only] [--all | --from-git | <path>...]
%prog is a wrapper around include-what-you-use that passes the appropriate
configuration and filters the output to ignore known issues. In addition,
it can automatically pipe the output back into the IWYU-provided 'fix_includes.py'
script in order to fix any reported issues.
"""
_MAPPINGS_DIR = os.path.join(ROOT, "build-support/iwyu/mappings/")
_TOOLCHAIN_DIR = os.path.join(ROOT, "thirdparty/clang-toolchain/bin")
_IWYU_TOOL = os.path.join(ROOT, "build-support/iwyu/iwyu_tool.py")
# Matches source files that we should run on.
_RE_SOURCE_FILE = re.compile(r'\.(c|cc|h)$')
# Matches compilation errors in the output of IWYU
_RE_CLANG_ERROR = re.compile(r'^.+?:\d+:\d+:\s*'
r'(fatal )?error:', re.MULTILINE)
# Files that we don't want to ever run IWYU on, because they aren't clean yet.
_MUTED_FILES = set([
"src/kudu/cfile/cfile_reader.h",
"src/kudu/cfile/cfile_writer.h",
"src/kudu/client/client-internal.h",
"src/kudu/client/client-test.cc",
"src/kudu/common/encoded_key-test.cc",
"src/kudu/common/schema.h",
"src/kudu/experiments/rwlock-perf.cc",
"src/kudu/hms/hms_catalog.cc",
"src/kudu/hms/hms_catalog.h",
"src/kudu/hms/hms_client-test.cc",
"src/kudu/hms/hms_client.cc",
"src/kudu/hms/hms_client.h",
"src/kudu/hms/mini_hms.h",
"src/kudu/master/catalog_manager.cc",
"src/kudu/master/catalog_manager.h",
"src/kudu/rpc/reactor.cc",
"src/kudu/rpc/reactor.h",
"src/kudu/security/ca/cert_management.cc",
"src/kudu/security/ca/cert_management.h",
"src/kudu/security/cert-test.cc",
"src/kudu/security/cert.cc",
"src/kudu/security/cert.h",
"src/kudu/security/openssl_util.cc",
"src/kudu/security/openssl_util.h",
"src/kudu/security/tls_context.cc",
"src/kudu/security/tls_handshake.cc",
"src/kudu/security/tls_socket.h",
"src/kudu/security/x509_check_host.cc",
"src/kudu/server/default-path-handlers.cc",
"src/kudu/server/webserver.cc",
"src/kudu/util/bit-util-test.cc",
"src/kudu/util/group_varint-test.cc",
"src/kudu/util/minidump.cc",
"src/kudu/util/mt-metrics-test.cc",
"src/kudu/util/process_memory.cc",
"src/kudu/util/rle-test.cc"
])
# Flags to pass to iwyu/fix_includes.py for Kudu-specific style.
_FIX_INCLUDES_STYLE_FLAGS = [
'--blank_lines',
'--blank_line_between_c_and_cxx_includes',
'--separate_project_includes=kudu/'
]
# Directory containin the compilation database
_BUILD_DIR = os.path.join(ROOT, 'build/latest')
def _get_file_list_from_git():
upstream_commit = get_upstream_commit()
out = check_output(["git", "diff", "--name-only", upstream_commit]).splitlines()
return [l for l in out if _RE_SOURCE_FILE.search(l)]
def _get_paths_from_compilation_db():
db_path = os.path.join(_BUILD_DIR, 'compile_commands.json')
with open(db_path, 'r') as fileobj:
compilation_db = json.load(fileobj)
return [entry['file'] for entry in compilation_db]
def _run_iwyu_tool(paths):
iwyu_args = ['--max_line_length=256']
for m in glob.glob(os.path.join(_MAPPINGS_DIR, "*.imp")):
iwyu_args.append("--mapping_file=%s" % os.path.abspath(m))
cmdline = [_IWYU_TOOL, '-p', _BUILD_DIR]
cmdline.extend(paths)
cmdline.append('--')
cmdline.extend(iwyu_args)
# iwyu_tool.py requires include-what-you-use on the path
env = os.environ.copy()
env['PATH'] = "%s:%s" % (_TOOLCHAIN_DIR, env['PATH'])
def crash(output):
sys.exit((Colors.RED + "Failed to run IWYU tool.\n\n" + Colors.RESET +
Colors.YELLOW + "Command line:\n" + Colors.RESET +
"%s\n\n" +
Colors.YELLOW + "Output:\n" + Colors.RESET +
"%s") % (" ".join(cmdline), output))
try:
output = check_output(cmdline, env=env, stderr=subprocess.STDOUT)
if '\nFATAL ERROR: ' in output or \
'Assertion failed: ' in output or \
_RE_CLANG_ERROR.search(output):
crash(output)
return output
except subprocess.CalledProcessError, e:
crash(e.output)
def _is_muted(path):
assert os.path.isabs(path)
rel = os.path.relpath(path, ROOT)
return not rel.startswith('src/') or rel in _MUTED_FILES
def _filter_paths(paths):
return [p for p in paths if not _is_muted(p)]
def _relativize_paths(paths):
""" Make paths relative to the build directory. """
return [os.path.relpath(p, _BUILD_DIR) for p in paths]
def _get_thirdparty_include_dirs():
return glob.glob(os.path.join(ROOT, "thirdparty", "installed", "*", "include"))
def _get_fixer_flags(flags):
args = ['--quiet',
'--nosafe_headers',
'--source_root=%s' % os.path.join(ROOT, 'src')]
if flags.dry_run:
args.append("--dry_run")
for d in _get_thirdparty_include_dirs():
args.extend(['--thirdparty_include_dir', d])
args.extend(_FIX_INCLUDES_STYLE_FLAGS)
fixer_flags, _ = iwyu.fix_includes.ParseArgs(args)
return fixer_flags
def _do_iwyu(flags, paths):
iwyu_output = _run_iwyu_tool(paths)
if flags.dump_iwyu_output:
logging.info("Dumping iwyu output to %s", flags.dump_iwyu_output)
with file(flags.dump_iwyu_output, "w") as f:
print(iwyu_output, file=f)
stream = StringIO(iwyu_output)
fixer_flags = _get_fixer_flags(flags)
# Passing None as 'fix_paths' tells the fixer script to process
# all of the IWYU output, instead of just the output corresponding
# to files in 'paths'. This means that if you run this script on a
# .cc file, it will also report and fix errors in headers included
# by that .cc file.
fix_paths = None
records = ParseAndMergeIWYUOutput(stream, fix_paths, fixer_flags)
unfiltered_count = len(records)
records = [r for r in records if not _is_muted(os.path.abspath(r.filename))]
if len(records) < unfiltered_count:
logging.info("Muted IWYU suggestions on %d file(s)", unfiltered_count - len(records))
return iwyu.fix_includes.FixManyFiles(records, fixer_flags)
def _do_sort_only(flags, paths):
fixer_flags = _get_fixer_flags(flags)
iwyu.fix_includes.SortIncludesInFiles(paths, fixer_flags)
def main(argv):
parser = optparse.OptionParser(usage=_USAGE)
for i, arg in enumerate(argv):
if arg.startswith('-'):
argv[i] = argv[i].replace('_', '-')
parser.add_option('--all', action='store_true',
help=('Process all files listed in the compilation database of the current '
'build.'))
parser.add_option('--from-git', action='store_true',
help=('Determine the list of files to run IWYU automatically based on git. '
'All files which are modified in the current working tree or in commits '
'not yet committed upstream by gerrit are processed.'))
parser.add_option('--fix', action='store_false', dest="dry_run", default=True,
help=('If this is set, fixes IWYU issues in place.'))
parser.add_option('-s', '--sort-only', action='store_true',
help=('Just sort #includes of files listed on cmdline;'
' do not add or remove any #includes'))
parser.add_option('--dump-iwyu-output', type='str',
help=('A path to dump the raw IWYU output to. This can be useful for '
'debugging this tool.'))
(flags, paths) = parser.parse_args(argv[1:])
if bool(flags.from_git) + bool(flags.all) + (len(paths) > 0) != 1:
sys.exit('Must specify exactly one of --all, --from-git, or a list of paths')
do_filtering = True
if flags.from_git:
paths = _get_file_list_from_git()
paths = [os.path.abspath(os.path.join(ROOT, p)) for p in paths]
elif paths:
paths = [os.path.abspath(p) for p in paths]
# If paths are specified explicitly, don't filter them out.
do_filtering = False
elif flags.all:
paths = _filter_paths(_get_paths_from_compilation_db())
else:
assert False, "Should not reach here"
if do_filtering:
orig_count = len(paths)
paths = _filter_paths(paths)
if len(paths) != orig_count:
logging.info("Filtered %d paths muted by configuration in iwyu.py",
orig_count - len(paths))
else:
muted_paths = [p for p in paths if _is_muted(p)]
if muted_paths:
logging.warning("%d selected path(s) are known to have IWYU issues:" % len(muted_paths))
for p in muted_paths:
logging.warning(" %s" % p)
# If we came up with an empty list (no relevant files changed in the commit)
# then we should early-exit. Otherwise, we'd end up passing an empty list to
# IWYU and it will run on every file.
if flags.from_git and not paths:
logging.info("No files selected for analysis.")
sys.exit(0)
# IWYU output will be relative to the compilation database which is in
# the build directory. In order for the fixer script to properly find them, we need
# to treat all paths relative to that directory and chdir into it first.
paths = _relativize_paths(paths)
os.chdir(_BUILD_DIR)
# For correct results, IWYU depends on the generated header files.
logging.info("Ensuring IWYU dependencies are built...")
if os.path.exists('Makefile'):
subprocess.check_call(['make', 'iwyu-generated-headers'])
elif os.path.exists('build.ninja'):
subprocess.check_call(['ninja', 'iwyu-generated-headers'])
else:
logging.error('No Makefile or build.ninja found in build directory %s',
_BUILD_DIR)
sys.exit(1)
logging.info("Checking %d file(s)...", len(paths))
if flags.sort_only:
return _do_sort_only(flags, paths)
else:
return _do_iwyu(flags, paths)
if __name__ == "__main__":
init_logging()
sys.exit(main(sys.argv))
|
{
"content_hash": "9e6a957188118f355638b98bf0d8428a",
"timestamp": "",
"source": "github",
"line_count": 276,
"max_line_length": 99,
"avg_line_length": 36.21739130434783,
"alnum_prop": 0.662264905962385,
"repo_name": "EvilMcJerkface/kudu",
"id": "f267d0c3c59d238ff9ac06205850102a1fed7a48",
"size": "10805",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "build-support/iwyu.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "423003"
},
{
"name": "C++",
"bytes": "14088007"
},
{
"name": "CMake",
"bytes": "203355"
},
{
"name": "CSS",
"bytes": "1364"
},
{
"name": "Clojure",
"bytes": "54969"
},
{
"name": "HTML",
"bytes": "24429"
},
{
"name": "Java",
"bytes": "1919604"
},
{
"name": "JavaScript",
"bytes": "5920"
},
{
"name": "Makefile",
"bytes": "658"
},
{
"name": "Perl",
"bytes": "32137"
},
{
"name": "Python",
"bytes": "485662"
},
{
"name": "R",
"bytes": "11537"
},
{
"name": "Scala",
"bytes": "166106"
},
{
"name": "Shell",
"bytes": "106702"
},
{
"name": "Thrift",
"bytes": "59110"
}
],
"symlink_target": ""
}
|
"""
Represents an EC2 Keypair
"""
class KeyPair:
def __init__(self, connection=None):
self.connection = connection
self.name = None
self.fingerprint = None
self.material = None
def __repr__(self):
return 'KeyPair:%s' % self.name
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'keyName':
self.name = value
elif name == 'keyFingerprint':
self.fingerprint = value
elif name == 'keyMaterial':
self.material = value
else:
setattr(self, name, value)
def delete(self):
return self.connection.delete_key_pair(self.name)
|
{
"content_hash": "d35ead068827b3f8badd1d13fd7d9e95",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 57,
"avg_line_length": 22.818181818181817,
"alnum_prop": 0.5697211155378487,
"repo_name": "carlgao/lenga",
"id": "172a80c955d166983b599158d3800808a589691c",
"size": "1858",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "images/lenny64-peon/usr/share/python-support/python-boto/boto/ec2/keypair.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "3281"
},
{
"name": "C#",
"bytes": "97763"
},
{
"name": "CSS",
"bytes": "39383"
},
{
"name": "Emacs Lisp",
"bytes": "6274490"
},
{
"name": "Frege",
"bytes": "463786"
},
{
"name": "IDL",
"bytes": "377510"
},
{
"name": "JavaScript",
"bytes": "1032063"
},
{
"name": "Mathematica",
"bytes": "11862"
},
{
"name": "Perl",
"bytes": "57841501"
},
{
"name": "Prolog",
"bytes": "9867"
},
{
"name": "Python",
"bytes": "10875379"
},
{
"name": "Ruby",
"bytes": "72162"
},
{
"name": "Shell",
"bytes": "22775"
},
{
"name": "Slash",
"bytes": "126702"
},
{
"name": "SystemVerilog",
"bytes": "105693"
},
{
"name": "TeX",
"bytes": "742855"
},
{
"name": "VimL",
"bytes": "1845"
},
{
"name": "XProc",
"bytes": "22962"
},
{
"name": "XSLT",
"bytes": "4075"
}
],
"symlink_target": ""
}
|
__version__=''' $Id: attrmap.py 3601 2009-11-26 15:11:20Z rgbecker $ '''
__doc__='''Framework for objects whose assignments are checked. Used by graphics.
We developed reportlab/graphics prior to Python 2 and metaclasses. For the
graphics, we wanted to be able to declare the attributes of a class, check
them on assignment, and convert from string arguments. Examples of
attrmap-based objects can be found in reportlab/graphics/shapes. It lets
us defined structures like the one below, which are seen more modern form in
Django models and other frameworks.
We'll probably replace this one day soon, hopefully with no impact on client
code.
class Rect(SolidShape):
"""Rectangle, possibly with rounded corners."""
_attrMap = AttrMap(BASE=SolidShape,
x = AttrMapValue(isNumber),
y = AttrMapValue(isNumber),
width = AttrMapValue(isNumber),
height = AttrMapValue(isNumber),
rx = AttrMapValue(isNumber),
ry = AttrMapValue(isNumber),
)
'''
from UserDict import UserDict
from reportlab.lib.validators import isAnything, _SequenceTypes, DerivedValue
from reportlab import rl_config
class CallableValue:
'''a class to allow callable initial values'''
def __init__(self,func,*args,**kw):
#assert iscallable(func)
self.func = func
self.args = args
self.kw = kw
def __call__(self):
return apply(self.func,self.args,self.kw)
class AttrMapValue:
'''Simple multi-value holder for attribute maps'''
def __init__(self,validate=None,desc=None,initial=None, advancedUsage=0, **kw):
self.validate = validate or isAnything
self.desc = desc
self._initial = initial
self._advancedUsage = advancedUsage
for k,v in kw.items():
setattr(self,k,v)
def __getattr__(self,name):
#hack to allow callable initial values
if name=='initial':
if isinstance(self._initial,CallableValue): return self._initial()
return self._initial
elif name=='hidden':
return 0
raise AttributeError, name
def __repr__(self):
return 'AttrMapValue(%s)' % ', '.join(['%s=%r' % i for i in self.__dict__.iteritems()])
class AttrMap(UserDict):
def __init__(self,BASE=None,UNWANTED=[],**kw):
data = {}
if BASE:
if isinstance(BASE,AttrMap):
data = BASE.data #they used BASECLASS._attrMap
else:
if type(BASE) not in (type(()),type([])): BASE = (BASE,)
for B in BASE:
if hasattr(B,'_attrMap'):
data.update(getattr(B._attrMap,'data',{}))
else:
raise ValueError, 'BASE=%s has wrong kind of value' % str(B)
UserDict.__init__(self,data)
self.remove(UNWANTED)
self.data.update(kw)
def update(self,kw):
if isinstance(kw,AttrMap): kw = kw.data
self.data.update(kw)
def remove(self,unwanted):
for k in unwanted:
try:
del self[k]
except KeyError:
pass
def clone(self,UNWANTED=[],**kw):
c = AttrMap(BASE=self,UNWANTED=UNWANTED)
c.update(kw)
return c
def validateSetattr(obj,name,value):
'''validate setattr(obj,name,value)'''
if rl_config.shapeChecking:
map = obj._attrMap
if map and name[0]!= '_':
#we always allow the inherited values; they cannot
#be checked until draw time.
if isinstance(value, DerivedValue):
#let it through
pass
else:
try:
validate = map[name].validate
if not validate(value):
raise AttributeError, "Illegal assignment of '%s' to '%s' in class %s" % (value, name, obj.__class__.__name__)
except KeyError:
raise AttributeError, "Illegal attribute '%s' in class %s" % (name, obj.__class__.__name__)
obj.__dict__[name] = value
def _privateAttrMap(obj,ret=0):
'''clone obj._attrMap if required'''
A = obj._attrMap
oA = getattr(obj.__class__,'_attrMap',None)
if ret:
if oA is A:
return A.clone(), oA
else:
return A, None
else:
if oA is A:
obj._attrMap = A.clone()
def _findObjectAndAttr(src, P):
'''Locate the object src.P for P a string, return parent and name of attribute
'''
P = string.split(P, '.')
if len(P) == 0:
return None, None
else:
for p in P[0:-1]:
src = getattr(src, p)
return src, P[-1]
def hook__setattr__(obj):
if not hasattr(obj,'__attrproxy__'):
C = obj.__class__
import new
obj.__class__=new.classobj(C.__name__,(C,)+C.__bases__,
{'__attrproxy__':[],
'__setattr__':lambda self,k,v,osa=getattr(obj,'__setattr__',None),hook=hook: hook(self,k,v,osa)})
def addProxyAttribute(src,name,validate=None,desc=None,initial=None,dst=None):
'''
Add a proxy attribute 'name' to src with targets dst
'''
#sanity
assert hasattr(src,'_attrMap'), 'src object has no _attrMap'
A, oA = _privateAttrMap(src,1)
if type(dst) not in _SequenceTypes: dst = dst,
D = []
DV = []
for d in dst:
if type(d) in _SequenceTypes:
d, e = d[0], d[1:]
obj, attr = _findObjectAndAttr(src,d)
if obj:
dA = getattr(obj,'_attrMap',None)
|
{
"content_hash": "dc7ae590bfd127a407ddace41f8bafce",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 134,
"avg_line_length": 33.92727272727273,
"alnum_prop": 0.5691318327974276,
"repo_name": "icomms/wqmanager",
"id": "d0b7d45dd0f9f1de7ae0538b4efd6103616e093d",
"size": "5782",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "reportlab/lib/attrmap.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "793418"
},
{
"name": "PHP",
"bytes": "2863"
},
{
"name": "Python",
"bytes": "3735941"
},
{
"name": "Shell",
"bytes": "383"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.