repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
liu602348184/django | tests/test_client/tests.py | 70 | 30346 | # -*- coding: utf-8 -*-
"""
Testing using the Test Client
The test client is a class that can act like a simple
browser for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
``Client`` objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the ``Client`` instance.
This is not intended as a replacement for Twill, Selenium, or
other browser automation frameworks - it is here to allow
testing against the contexts and templates produced by a view,
rather than the HTML rendered to the end-user.
"""
from __future__ import unicode_literals
import datetime
from django.contrib.auth.models import User
from django.core import mail
from django.http import HttpResponse
from django.test import (
Client, RequestFactory, SimpleTestCase, TestCase, override_settings,
)
from .views import get_view, post_view, trace_view
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='test_client.urls',)
class ClientTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='testclient',
first_name='Test', last_name='Client', email='testclient@example.com', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u2 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='inactive',
first_name='Inactive', last_name='User', email='testclient@example.com', is_staff=False, is_active=False,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u3 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='staff',
first_name='Staff', last_name='Member', email='testclient@example.com', is_staff=True, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
def test_get_view(self):
"GET a view"
# The data is ignored, but let's check it doesn't crash the system
# anyway.
data = {'var': '\xf2'}
response = self.client.get('/get_view/', data)
# Check some response details
self.assertContains(response, 'This is a test')
self.assertEqual(response.context['var'], '\xf2')
self.assertEqual(response.templates[0].name, 'GET Template')
def test_get_post_view(self):
"GET a view that normally expects POSTs"
response = self.client.get('/post_view/', {})
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, 'Empty GET Template')
self.assertTemplateUsed(response, 'Empty GET Template')
self.assertTemplateNotUsed(response, 'Empty POST Template')
def test_empty_post(self):
"POST an empty dictionary to a view"
response = self.client.post('/post_view/', {})
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, 'Empty POST Template')
self.assertTemplateNotUsed(response, 'Empty GET Template')
self.assertTemplateUsed(response, 'Empty POST Template')
def test_post(self):
"POST some data to a view"
post_data = {
'value': 37
}
response = self.client.post('/post_view/', post_data)
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['data'], '37')
self.assertEqual(response.templates[0].name, 'POST Template')
self.assertContains(response, 'Data received')
def test_trace(self):
"""TRACE a view"""
response = self.client.trace('/trace_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['method'], 'TRACE')
self.assertEqual(response.templates[0].name, 'TRACE Template')
def test_response_headers(self):
"Check the value of HTTP headers returned in a response"
response = self.client.get("/header_view/")
self.assertEqual(response['X-DJANGO-TEST'], 'Slartibartfast')
def test_response_attached_request(self):
"""
Check that the returned response has a ``request`` attribute with the
originating environ dict and a ``wsgi_request`` with the originating
``WSGIRequest`` instance.
"""
response = self.client.get("/header_view/")
self.assertTrue(hasattr(response, 'request'))
self.assertTrue(hasattr(response, 'wsgi_request'))
for key, value in response.request.items():
self.assertIn(key, response.wsgi_request.environ)
self.assertEqual(response.wsgi_request.environ[key], value)
def test_response_resolver_match(self):
"""
The response contains a ResolverMatch instance.
"""
response = self.client.get('/header_view/')
self.assertTrue(hasattr(response, 'resolver_match'))
def test_response_resolver_match_redirect_follow(self):
"""
The response ResolverMatch instance contains the correct
information when following redirects.
"""
response = self.client.get('/redirect_view/', follow=True)
self.assertEqual(response.resolver_match.url_name, 'get_view')
def test_response_resolver_match_regular_view(self):
"""
The response ResolverMatch instance contains the correct
information when accessing a regular view.
"""
response = self.client.get('/get_view/')
self.assertEqual(response.resolver_match.url_name, 'get_view')
def test_raw_post(self):
"POST raw data (with a content type) to a view"
test_doc = """<?xml version="1.0" encoding="utf-8"?><library><book><title>Blink</title><author>Malcolm Gladwell</author></book></library>"""
response = self.client.post("/raw_post_view/", test_doc,
content_type="text/xml")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, "Book template")
self.assertEqual(response.content, b"Blink - Malcolm Gladwell")
def test_insecure(self):
"GET a URL through http"
response = self.client.get('/secure_view/', secure=False)
self.assertFalse(response.test_was_secure_request)
self.assertEqual(response.test_server_port, '80')
def test_secure(self):
"GET a URL through https"
response = self.client.get('/secure_view/', secure=True)
self.assertTrue(response.test_was_secure_request)
self.assertEqual(response.test_server_port, '443')
def test_redirect(self):
"GET a URL that redirects elsewhere"
response = self.client.get('/redirect_view/')
# Check that the response was a 302 (redirect)
self.assertRedirects(response, '/get_view/')
def test_redirect_with_query(self):
"GET a URL that redirects with given GET parameters"
response = self.client.get('/redirect_view/', {'var': 'value'})
# Check if parameters are intact
self.assertRedirects(response, '/get_view/?var=value')
def test_permanent_redirect(self):
"GET a URL that redirects permanently elsewhere"
response = self.client.get('/permanent_redirect_view/')
# Check that the response was a 301 (permanent redirect)
self.assertRedirects(response, '/get_view/', status_code=301)
def test_temporary_redirect(self):
"GET a URL that does a non-permanent redirect"
response = self.client.get('/temporary_redirect_view/')
# Check that the response was a 302 (non-permanent redirect)
self.assertRedirects(response, '/get_view/', status_code=302)
def test_redirect_to_strange_location(self):
"GET a URL that redirects to a non-200 page"
response = self.client.get('/double_redirect_view/')
# Check that the response was a 302, and that
# the attempt to get the redirection location returned 301 when retrieved
self.assertRedirects(response, '/permanent_redirect_view/', target_status_code=301)
def test_follow_redirect(self):
"A URL that redirects can be followed to termination."
response = self.client.get('/double_redirect_view/', follow=True)
self.assertRedirects(response, '/get_view/', status_code=302, target_status_code=200)
self.assertEqual(len(response.redirect_chain), 2)
def test_redirect_http(self):
"GET a URL that redirects to an http URI"
response = self.client.get('/http_redirect_view/', follow=True)
self.assertFalse(response.test_was_secure_request)
def test_redirect_https(self):
"GET a URL that redirects to an https URI"
response = self.client.get('/https_redirect_view/', follow=True)
self.assertTrue(response.test_was_secure_request)
def test_notfound_response(self):
"GET a URL that responds as '404:Not Found'"
response = self.client.get('/bad_view/')
# Check that the response was a 404, and that the content contains MAGIC
self.assertContains(response, 'MAGIC', status_code=404)
def test_valid_form(self):
"POST valid data to a form"
post_data = {
'text': 'Hello World',
'email': 'foo@example.com',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Valid POST Template")
def test_valid_form_with_hints(self):
"GET a form, providing hints in the GET data"
hints = {
'text': 'Hello World',
'multi': ('b', 'c', 'e')
}
response = self.client.get('/form_view/', data=hints)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Form GET Template")
# Check that the multi-value data has been rolled out ok
self.assertContains(response, 'Select a valid choice.', 0)
def test_incomplete_data_form(self):
"POST incomplete data to a form"
post_data = {
'text': 'Hello World',
'value': 37
}
response = self.client.post('/form_view/', post_data)
self.assertContains(response, 'This field is required.', 3)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'This field is required.')
self.assertFormError(response, 'form', 'single', 'This field is required.')
self.assertFormError(response, 'form', 'multi', 'This field is required.')
def test_form_error(self):
"POST erroneous data to a form"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'Enter a valid email address.')
def test_valid_form_with_template(self):
"POST valid data to a form using multiple templates"
post_data = {
'text': 'Hello World',
'email': 'foo@example.com',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view_with_template/', post_data)
self.assertContains(response, 'POST data OK')
self.assertTemplateUsed(response, "form_view.html")
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Valid POST Template")
def test_incomplete_data_form_with_template(self):
"POST incomplete data to a form using multiple templates"
post_data = {
'text': 'Hello World',
'value': 37
}
response = self.client.post('/form_view_with_template/', post_data)
self.assertContains(response, 'POST data has errors')
self.assertTemplateUsed(response, 'form_view.html')
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'This field is required.')
self.assertFormError(response, 'form', 'single', 'This field is required.')
self.assertFormError(response, 'form', 'multi', 'This field is required.')
def test_form_error_with_template(self):
"POST erroneous data to a form using multiple templates"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view_with_template/', post_data)
self.assertContains(response, 'POST data has errors')
self.assertTemplateUsed(response, "form_view.html")
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'Enter a valid email address.')
def test_unknown_page(self):
"GET an invalid URL"
response = self.client.get('/unknown_view/')
# Check that the response was a 404
self.assertEqual(response.status_code, 404)
def test_url_parameters(self):
"Make sure that URL ;-parameters are not stripped."
response = self.client.get('/unknown_view/;some-parameter')
# Check that the path in the response includes it (ignore that it's a 404)
self.assertEqual(response.request['PATH_INFO'], '/unknown_view/;some-parameter')
def test_view_with_login(self):
"Request a page that is protected with @login_required"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_force_login(self):
"Request a page that is protected with @login_required"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_method_login(self):
"Request a page that is protected with a @login_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_method_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_method_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/login_protected_method_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_method_force_login(self):
"Request a page that is protected with a @login_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_method_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_method_view/')
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get('/login_protected_method_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_login_and_custom_redirect(self):
"Request a page that is protected with @login_required(redirect_field_name='redirect_to')"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view_custom_redirect/')
self.assertRedirects(response, '/accounts/login/?redirect_to=/login_protected_view_custom_redirect/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/login_protected_view_custom_redirect/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_force_login_and_custom_redirect(self):
"""
Request a page that is protected with
@login_required(redirect_field_name='redirect_to')
"""
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view_custom_redirect/')
self.assertRedirects(response, '/accounts/login/?redirect_to=/login_protected_view_custom_redirect/')
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get('/login_protected_view_custom_redirect/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_bad_login(self):
"Request a page that is protected with @login, but use bad credentials"
login = self.client.login(username='otheruser', password='nopassword')
self.assertFalse(login)
def test_view_with_inactive_login(self):
"Request a page that is protected with @login, but use an inactive login"
login = self.client.login(username='inactive', password='password')
self.assertFalse(login)
def test_view_with_inactive_force_login(self):
"Request a page that is protected with @login, but use an inactive login"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
# Log in
self.client.force_login(self.u2)
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'inactive')
def test_logout(self):
"Request a logout after logging in"
# Log in
self.client.login(username='testclient', password='password')
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
# Log out
self.client.logout()
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
def test_logout_with_force_login(self):
"Request a logout after logging in"
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
# Log out
self.client.logout()
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
@override_settings(
AUTHENTICATION_BACKENDS=[
'django.contrib.auth.backends.ModelBackend',
'test_client.auth_backends.TestClientBackend',
],
)
def test_force_login_with_backend(self):
"""
Request a page that is protected with @login_required when using
force_login() and passing a backend.
"""
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
# Log in
self.client.force_login(self.u1, backend='test_client.auth_backends.TestClientBackend')
self.assertEqual(self.u1.backend, 'test_client.auth_backends.TestClientBackend')
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
@override_settings(SESSION_ENGINE="django.contrib.sessions.backends.signed_cookies")
def test_logout_cookie_sessions(self):
self.test_logout()
def test_view_with_permissions(self):
"Request a page that is protected with @permission_required"
# Get the page without logging in. Should result in 302.
response = self.client.get('/permission_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/permission_protected_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Log in with wrong permissions. Should result in 302.
response = self.client.get('/permission_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/permission_protected_view/')
# TODO: Log in with right permissions and request the page again
def test_view_with_permissions_exception(self):
"Request a page that is protected with @permission_required but raises an exception"
# Get the page without logging in. Should result in 403.
response = self.client.get('/permission_protected_view_exception/')
self.assertEqual(response.status_code, 403)
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Log in with wrong permissions. Should result in 403.
response = self.client.get('/permission_protected_view_exception/')
self.assertEqual(response.status_code, 403)
def test_view_with_method_permissions(self):
"Request a page that is protected with a @permission_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get('/permission_protected_method_view/')
self.assertRedirects(response, '/accounts/login/?next=/permission_protected_method_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Log in with wrong permissions. Should result in 302.
response = self.client.get('/permission_protected_method_view/')
self.assertRedirects(response, '/accounts/login/?next=/permission_protected_method_view/')
# TODO: Log in with right permissions and request the page again
def test_external_redirect(self):
response = self.client.get('/django_project_redirect/')
self.assertRedirects(response, 'https://www.djangoproject.com/', fetch_redirect_response=False)
def test_session_modifying_view(self):
"Request a page that modifies the session"
# Session value isn't set initially
try:
self.client.session['tobacconist']
self.fail("Shouldn't have a session value")
except KeyError:
pass
self.client.post('/session_view/')
# Check that the session was modified
self.assertEqual(self.client.session['tobacconist'], 'hovercraft')
def test_view_with_exception(self):
"Request a page that is known to throw an error"
self.assertRaises(KeyError, self.client.get, "/broken_view/")
# Try the same assertion, a different way
try:
self.client.get('/broken_view/')
self.fail('Should raise an error')
except KeyError:
pass
def test_mail_sending(self):
"Test that mail is redirected to a dummy outbox during test setup"
response = self.client.get('/mail_sending_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Test message')
self.assertEqual(mail.outbox[0].body, 'This is a test email')
self.assertEqual(mail.outbox[0].from_email, 'from@example.com')
self.assertEqual(mail.outbox[0].to[0], 'first@example.com')
self.assertEqual(mail.outbox[0].to[1], 'second@example.com')
def test_mass_mail_sending(self):
"Test that mass mail is redirected to a dummy outbox during test setup"
response = self.client.get('/mass_mail_sending_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[0].subject, 'First Test message')
self.assertEqual(mail.outbox[0].body, 'This is the first test email')
self.assertEqual(mail.outbox[0].from_email, 'from@example.com')
self.assertEqual(mail.outbox[0].to[0], 'first@example.com')
self.assertEqual(mail.outbox[0].to[1], 'second@example.com')
self.assertEqual(mail.outbox[1].subject, 'Second Test message')
self.assertEqual(mail.outbox[1].body, 'This is the second test email')
self.assertEqual(mail.outbox[1].from_email, 'from@example.com')
self.assertEqual(mail.outbox[1].to[0], 'second@example.com')
self.assertEqual(mail.outbox[1].to[1], 'third@example.com')
@override_settings(
MIDDLEWARE_CLASSES=['django.middleware.csrf.CsrfViewMiddleware'],
ROOT_URLCONF='test_client.urls',
)
class CSRFEnabledClientTests(SimpleTestCase):
def test_csrf_enabled_client(self):
"A client can be instantiated with CSRF checks enabled"
csrf_client = Client(enforce_csrf_checks=True)
# The normal client allows the post
response = self.client.post('/post_view/', {})
self.assertEqual(response.status_code, 200)
# The CSRF-enabled client rejects it
response = csrf_client.post('/post_view/', {})
self.assertEqual(response.status_code, 403)
class CustomTestClient(Client):
i_am_customized = "Yes"
class CustomTestClientTest(SimpleTestCase):
client_class = CustomTestClient
def test_custom_test_client(self):
"""A test case can specify a custom class for self.client."""
self.assertEqual(hasattr(self.client, "i_am_customized"), True)
_generic_view = lambda request: HttpResponse(status=200)
@override_settings(ROOT_URLCONF='test_client.urls')
class RequestFactoryTest(SimpleTestCase):
"""Tests for the request factory."""
# A mapping between names of HTTP/1.1 methods and their test views.
http_methods_and_views = (
('get', get_view),
('post', post_view),
('put', _generic_view),
('patch', _generic_view),
('delete', _generic_view),
('head', _generic_view),
('options', _generic_view),
('trace', trace_view),
)
def setUp(self):
self.request_factory = RequestFactory()
def test_request_factory(self):
"""The request factory implements all the HTTP/1.1 methods."""
for method_name, view in self.http_methods_and_views:
method = getattr(self.request_factory, method_name)
request = method('/somewhere/')
response = view(request)
self.assertEqual(response.status_code, 200)
def test_get_request_from_factory(self):
"""
The request factory returns a templated response for a GET request.
"""
request = self.request_factory.get('/somewhere/')
response = get_view(request)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'This is a test')
def test_trace_request_from_factory(self):
"""The request factory returns an echo response for a TRACE request."""
url_path = '/somewhere/'
request = self.request_factory.trace(url_path)
response = trace_view(request)
protocol = request.META["SERVER_PROTOCOL"]
echoed_request_line = "TRACE {} {}".format(url_path, protocol)
self.assertEqual(response.status_code, 200)
self.assertContains(response, echoed_request_line)
| bsd-3-clause |
affo/nova | nova/openstack/common/report/generators/conf.py | 4 | 1420 | # Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides OpenStack config generators
This module defines a class for configuration
generators for generating the model in
:mod:`openstack.common.report.models.conf`.
"""
from oslo_config import cfg
from nova.openstack.common.report.models import conf as cm
class ConfigReportGenerator(object):
"""A Configuration Data Generator
This generator returns
:class:`openstack.common.report.models.conf.ConfigModel`,
by default using the configuration options stored
in :attr:`oslo.config.cfg.CONF`, which is where
OpenStack stores everything.
:param cnf: the configuration option object
:type cnf: :class:`oslo.config.cfg.ConfigOpts`
"""
def __init__(self, cnf=cfg.CONF):
self.conf_obj = cnf
def __call__(self):
return cm.ConfigModel(self.conf_obj)
| apache-2.0 |
OCA/stock-logistics-reporting | stock_analysis/report/stock_analysis.py | 1 | 1858 | # © 2016 Lorenzo Battistini - Agile Business Group
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import tools
from odoo import models, fields, api
class StockAnalysis(models.Model):
_name = 'stock.analysis'
_auto = False
_rec_name = 'product_id'
_description = 'Stock analysis view'
product_id = fields.Many2one(
'product.product', string='Product', readonly=True)
location_id = fields.Many2one(
'stock.location', string='Location', readonly=True)
quantity = fields.Float(string='Quantity', readonly=True)
lot_id = fields.Many2one(
'stock.production.lot', string='Lot', readonly=True)
package_id = fields.Many2one(
'stock.quant.package', string='Package', readonly=True)
in_date = fields.Datetime('Incoming Date', readonly=True)
categ_id = fields.Many2one(
'product.category', string='Category', readonly=True)
company_id = fields.Many2one(
'res.company', string='Company', readonly=True)
@api.model_cr
def init(self):
tools.drop_view_if_exists(self.env.cr, self._table)
self.env.cr.execute(
"""CREATE or REPLACE VIEW %s as (
SELECT
quant.id AS id,
quant.product_id AS product_id,
quant.location_id AS location_id,
quant.quantity AS quantity,
quant.lot_id AS lot_id,
quant.package_id AS package_id,
quant.in_date AS in_date,
quant.company_id,
template.categ_id AS categ_id
FROM stock_quant AS quant
JOIN product_product prod ON prod.id = quant.product_id
JOIN product_template template
ON template.id = prod.product_tmpl_id
)"""
% (self._table)
)
| agpl-3.0 |
p0psicles/SickRage | lib/sqlalchemy/testing/plugin/pytestplugin.py | 76 | 4379 | import pytest
import argparse
import inspect
from . import plugin_base
import collections
def pytest_addoption(parser):
group = parser.getgroup("sqlalchemy")
def make_option(name, **kw):
callback_ = kw.pop("callback", None)
if callback_:
class CallableAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
callback_(option_string, values, parser)
kw["action"] = CallableAction
group.addoption(name, **kw)
plugin_base.setup_options(make_option)
plugin_base.read_config()
def pytest_configure(config):
plugin_base.pre_begin(config.option)
plugin_base.set_coverage_flag(bool(getattr(config.option, "cov_source", False)))
plugin_base.post_begin()
def pytest_collection_modifyitems(session, config, items):
# look for all those classes that specify __backend__ and
# expand them out into per-database test cases.
# this is much easier to do within pytest_pycollect_makeitem, however
# pytest is iterating through cls.__dict__ as makeitem is
# called which causes a "dictionary changed size" error on py3k.
# I'd submit a pullreq for them to turn it into a list first, but
# it's to suit the rather odd use case here which is that we are adding
# new classes to a module on the fly.
rebuilt_items = collections.defaultdict(list)
test_classes = set(item.parent for item in items)
for test_class in test_classes:
for sub_cls in plugin_base.generate_sub_tests(test_class.cls, test_class.parent.module):
if sub_cls is not test_class.cls:
list_ = rebuilt_items[test_class.cls]
for inst in pytest.Class(sub_cls.__name__,
parent=test_class.parent.parent).collect():
list_.extend(inst.collect())
newitems = []
for item in items:
if item.parent.cls in rebuilt_items:
newitems.extend(rebuilt_items[item.parent.cls])
rebuilt_items[item.parent.cls][:] = []
else:
newitems.append(item)
# seems like the functions attached to a test class aren't sorted already?
# is that true and why's that? (when using unittest, they're sorted)
items[:] = sorted(newitems, key=lambda item: (
item.parent.parent.parent.name,
item.parent.parent.name,
item.name
)
)
def pytest_pycollect_makeitem(collector, name, obj):
if inspect.isclass(obj) and plugin_base.want_class(obj):
return pytest.Class(name, parent=collector)
elif inspect.isfunction(obj) and \
name.startswith("test_") and \
isinstance(collector, pytest.Instance):
return pytest.Function(name, parent=collector)
else:
return []
_current_class = None
def pytest_runtest_setup(item):
# here we seem to get called only based on what we collected
# in pytest_collection_modifyitems. So to do class-based stuff
# we have to tear that out.
global _current_class
if not isinstance(item, pytest.Function):
return
# ... so we're doing a little dance here to figure it out...
if item.parent.parent is not _current_class:
class_setup(item.parent.parent)
_current_class = item.parent.parent
# this is needed for the class-level, to ensure that the
# teardown runs after the class is completed with its own
# class-level teardown...
item.parent.parent.addfinalizer(lambda: class_teardown(item.parent.parent))
test_setup(item)
def pytest_runtest_teardown(item):
# ...but this works better as the hook here rather than
# using a finalizer, as the finalizer seems to get in the way
# of the test reporting failures correctly (you get a bunch of
# py.test assertion stuff instead)
test_teardown(item)
def test_setup(item):
plugin_base.before_test(item,
item.parent.module.__name__, item.parent.cls, item.name)
def test_teardown(item):
plugin_base.after_test(item)
def class_setup(item):
plugin_base.start_test_class(item.cls)
def class_teardown(item):
plugin_base.stop_test_class(item.cls)
| gpl-3.0 |
livepy/scrapy | extras/qps-bench-server.py | 178 | 1640 | #!/usr/bin/env python
from __future__ import print_function
from time import time
from collections import deque
from twisted.web.server import Site, NOT_DONE_YET
from twisted.web.resource import Resource
from twisted.internet import reactor
class Root(Resource):
def __init__(self):
Resource.__init__(self)
self.concurrent = 0
self.tail = deque(maxlen=100)
self._reset_stats()
def _reset_stats(self):
self.tail.clear()
self.start = self.lastmark = self.lasttime = time()
def getChild(self, request, name):
return self
def render(self, request):
now = time()
delta = now - self.lasttime
# reset stats on high iter-request times caused by client restarts
if delta > 3: # seconds
self._reset_stats()
return ''
self.tail.appendleft(delta)
self.lasttime = now
self.concurrent += 1
if now - self.lastmark >= 3:
self.lastmark = now
qps = len(self.tail) / sum(self.tail)
print('samplesize={0} concurrent={1} qps={2:0.2f}'.format(len(self.tail), self.concurrent, qps))
if 'latency' in request.args:
latency = float(request.args['latency'][0])
reactor.callLater(latency, self._finish, request)
return NOT_DONE_YET
self.concurrent -= 1
return ''
def _finish(self, request):
self.concurrent -= 1
if not request.finished and not request._disconnected:
request.finish()
root = Root()
factory = Site(root)
reactor.listenTCP(8880, factory)
reactor.run()
| bsd-3-clause |
illfelder/compute-image-packages | packages/python-google-compute-engine/google_compute_engine/config_manager.py | 6 | 3753 | #!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library for retrieving and modifying configuration settings."""
import os
import textwrap
from google_compute_engine import constants
from google_compute_engine import file_utils
from google_compute_engine.compat import parser
CONFIG = constants.SYSCONFDIR + '/instance_configs.cfg'
class ConfigManager(object):
"""Process the configuration defaults."""
def __init__(self, config_file=None, config_header=None):
"""Constructor.
Args:
config_file: string, the location of the config file.
config_header: string, the message to write at the top of the config.
"""
self.config_file = config_file or CONFIG
self.config_header = config_header
self.config = parser.Parser()
self.config.read(self.config_file)
def _AddHeader(self, fp):
"""Create a file header in the config.
Args:
fp: int, a file pointer for writing the header.
"""
text = textwrap.wrap(
textwrap.dedent(self.config_header), break_on_hyphens=False)
fp.write('\n'.join(['# ' + line for line in text]))
fp.write('\n\n')
def GetOptionString(self, section, option):
"""Get the value of an option in the config file.
Args:
section: string, the section of the config file to check.
option: string, the option to retrieve the value of.
Returns:
string, the value of the option or None if the option doesn't exist.
"""
if self.config.has_option(section, option):
return self.config.get(section, option)
else:
return None
def GetOptionBool(self, section, option):
"""Get the value of an option in the config file.
Args:
section: string, the section of the config file to check.
option: string, the option to retrieve the value of.
Returns:
bool, True if the option is enabled or not set.
"""
return (not self.config.has_option(section, option)
or self.config.getboolean(section, option))
def SetOption(self, section, option, value, overwrite=True):
"""Set the value of an option in the config file.
Args:
section: string, the section of the config file to check.
option: string, the option to set the value of.
value: string, the value to set the option.
overwrite: bool, True to overwrite an existing value in the config file.
"""
if not overwrite and self.config.has_option(section, option):
return
if not self.config.has_section(section):
self.config.add_section(section)
self.config.set(section, option, str(value))
def WriteConfig(self, config_file=None):
"""Write the config values to a given file.
Args:
config_file: string, the file location of the config file to write.
"""
config_file = config_file or self.config_file
config_name = os.path.splitext(os.path.basename(config_file))[0]
config_lock = (
'%s/lock/google_%s.lock' % (constants.LOCALSTATEDIR, config_name))
with file_utils.LockFile(config_lock):
with open(config_file, 'w') as config_fp:
if self.config_header:
self._AddHeader(config_fp)
self.config.write(config_fp)
| apache-2.0 |
miguelpalacio/python-for-android | python3-alpha/python-libs/gdata/tlslite/utils/Cryptlib_TripleDES.py | 48 | 1410 | """Cryptlib 3DES implementation."""
from .cryptomath import *
from .TripleDES import *
if cryptlibpyLoaded:
def new(key, mode, IV):
return Cryptlib_TripleDES(key, mode, IV)
class Cryptlib_TripleDES(TripleDES):
def __init__(self, key, mode, IV):
TripleDES.__init__(self, key, mode, IV, "cryptlib")
self.context = cryptlib_py.cryptCreateContext(cryptlib_py.CRYPT_UNUSED, cryptlib_py.CRYPT_ALGO_3DES)
cryptlib_py.cryptSetAttribute(self.context, cryptlib_py.CRYPT_CTXINFO_MODE, cryptlib_py.CRYPT_MODE_CBC)
cryptlib_py.cryptSetAttribute(self.context, cryptlib_py.CRYPT_CTXINFO_KEYSIZE, len(key))
cryptlib_py.cryptSetAttributeString(self.context, cryptlib_py.CRYPT_CTXINFO_KEY, key)
cryptlib_py.cryptSetAttributeString(self.context, cryptlib_py.CRYPT_CTXINFO_IV, IV)
def __del__(self):
cryptlib_py.cryptDestroyContext(self.context)
def encrypt(self, plaintext):
TripleDES.encrypt(self, plaintext)
bytes = stringToBytes(plaintext)
cryptlib_py.cryptEncrypt(self.context, bytes)
return bytesToString(bytes)
def decrypt(self, ciphertext):
TripleDES.decrypt(self, ciphertext)
bytes = stringToBytes(ciphertext)
cryptlib_py.cryptDecrypt(self.context, bytes)
return bytesToString(bytes) | apache-2.0 |
ruleant/weblate | weblate/accounts/avatar.py | 1 | 5230 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2014 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import urllib2
import urllib
import hashlib
import os.path
from django.core.cache import get_cache, InvalidCacheBackendError
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import pgettext
from django.core.urlresolvers import reverse
from django.conf import settings
import weblate
from weblate import appsettings
try:
import libravatar # pylint: disable=import-error
HAS_LIBRAVATAR = True
except ImportError:
HAS_LIBRAVATAR = False
PLURAL_SEPARATOR = '\x1e\x1e'
def avatar_for_email(email, size=80):
"""
Generates url for avatar.
"""
# Safely handle blank email
if email == '':
email = 'noreply@weblate.org'
# Retrieve from cache
cache_key = 'avatar-{0}-{1}'.format(email, size)
cache = get_cache('default')
url = cache.get(cache_key)
if url is not None:
return url
if HAS_LIBRAVATAR:
# Use libravatar library if available
url = libravatar.libravatar_url(
email=email,
https=True,
default=appsettings.AVATAR_DEFAULT_IMAGE,
size=size
)
else:
# Fallback to standard method
mail_hash = hashlib.md5(email.lower()).hexdigest()
url = "{0}avatar/{1}?{2}".format(
appsettings.AVATAR_URL_PREFIX,
mail_hash,
urllib.urlencode({
's': str(size),
'd': appsettings.AVATAR_DEFAULT_IMAGE
})
)
# Store result in cache
cache.set(cache_key, url, 3600 * 24)
return url
def get_fallback_avatar_url(size):
"""
Returns URL of fallback avatar.
"""
return os.path.join(
settings.MEDIA_URL,
'weblate-{0}.png'.format(size)
)
def get_fallback_avatar(size):
"""
Returns fallback avatar.
"""
fallback = os.path.join(
appsettings.WEB_ROOT,
'media/weblate-{0}.png'.format(size)
)
with open(fallback, 'r') as handle:
return handle.read()
def get_avatar_image(user, size):
"""
Returns avatar image from cache (if available) or downloads it.
"""
cache_key = u'avatar-img-{0}-{1}'.format(
user.username,
size
)
# Try using avatar specific cache if available
try:
cache = get_cache('avatar')
except InvalidCacheBackendError:
cache = get_cache('default')
image = cache.get(cache_key)
if image is None:
try:
image = download_avatar_image(user, size)
cache.set(cache_key, image)
except IOError as error:
weblate.logger.error(
'Failed to fetch avatar for %s: %s',
user.username,
str(error)
)
return get_fallback_avatar(size)
return image
def download_avatar_image(user, size):
"""
Downloads avatar image from remote server.
"""
url = avatar_for_email(user.email, size)
request = urllib2.Request(url)
request.timeout = 0.5
request.add_header('User-Agent', weblate.USER_AGENT)
# Fire request
handle = urllib2.urlopen(request)
# Read and possibly convert response
return handle.read()
def get_user_display(user, icon=True, link=False):
"""
Nicely formats user for display.
"""
# Did we get any user?
if user is None:
# None user, probably remotely triggered action
full_name = pgettext('No known user', 'None')
else:
# Get full name
full_name = user.first_name
# Use user name if full name is empty
if full_name.strip() == '':
full_name = user.username
# Escape HTML
full_name = escape(full_name)
# Icon requested?
if icon and appsettings.ENABLE_AVATARS:
if user is None or user.email == 'noreply@weblate.org':
avatar = get_fallback_avatar_url(32)
else:
avatar = reverse(
'user_avatar', kwargs={'user': user.username, 'size': 32}
)
full_name = u'<img src="{avatar}" class="avatar" /> {name}'.format(
name=full_name,
avatar=avatar
)
if link and user is not None:
return mark_safe(u'<a href="{link}">{name}</a>'.format(
name=full_name,
link=reverse('user_page', kwargs={'user': user.username}),
))
else:
return mark_safe(full_name)
| gpl-3.0 |
roadmapper/ansible | lib/ansible/plugins/action/pause.py | 69 | 10469 | # Copyright 2012, Tim Bielawa <tbielawa@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
import signal
import sys
import termios
import time
import tty
from os import isatty
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_text, to_native
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.module_utils.six import PY3
from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
display = Display()
try:
import curses
# Nest the try except since curses.error is not available if curses did not import
try:
curses.setupterm()
HAS_CURSES = True
except curses.error:
HAS_CURSES = False
except ImportError:
HAS_CURSES = False
if HAS_CURSES:
MOVE_TO_BOL = curses.tigetstr('cr')
CLEAR_TO_EOL = curses.tigetstr('el')
else:
MOVE_TO_BOL = b'\r'
CLEAR_TO_EOL = b'\x1b[K'
class AnsibleTimeoutExceeded(Exception):
pass
def timeout_handler(signum, frame):
raise AnsibleTimeoutExceeded
def clear_line(stdout):
stdout.write(b'\x1b[%s' % MOVE_TO_BOL)
stdout.write(b'\x1b[%s' % CLEAR_TO_EOL)
class ActionModule(ActionBase):
''' pauses execution for a length or time, or until input is received '''
BYPASS_HOST_LOOP = True
_VALID_ARGS = frozenset(('echo', 'minutes', 'prompt', 'seconds'))
def run(self, tmp=None, task_vars=None):
''' run the pause action module '''
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
duration_unit = 'minutes'
prompt = None
seconds = None
echo = True
echo_prompt = ''
result.update(dict(
changed=False,
rc=0,
stderr='',
stdout='',
start=None,
stop=None,
delta=None,
echo=echo
))
# Should keystrokes be echoed to stdout?
if 'echo' in self._task.args:
try:
echo = boolean(self._task.args['echo'])
except TypeError as e:
result['failed'] = True
result['msg'] = to_native(e)
return result
# Add a note saying the output is hidden if echo is disabled
if not echo:
echo_prompt = ' (output is hidden)'
# Is 'prompt' a key in 'args'?
if 'prompt' in self._task.args:
prompt = "[%s]\n%s%s:" % (self._task.get_name().strip(), self._task.args['prompt'], echo_prompt)
else:
# If no custom prompt is specified, set a default prompt
prompt = "[%s]\n%s%s:" % (self._task.get_name().strip(), 'Press enter to continue, Ctrl+C to interrupt', echo_prompt)
# Are 'minutes' or 'seconds' keys that exist in 'args'?
if 'minutes' in self._task.args or 'seconds' in self._task.args:
try:
if 'minutes' in self._task.args:
# The time() command operates in seconds so we need to
# recalculate for minutes=X values.
seconds = int(self._task.args['minutes']) * 60
else:
seconds = int(self._task.args['seconds'])
duration_unit = 'seconds'
except ValueError as e:
result['failed'] = True
result['msg'] = u"non-integer value given for prompt duration:\n%s" % to_text(e)
return result
########################################################################
# Begin the hard work!
start = time.time()
result['start'] = to_text(datetime.datetime.now())
result['user_input'] = b''
stdin_fd = None
old_settings = None
try:
if seconds is not None:
if seconds < 1:
seconds = 1
# setup the alarm handler
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(seconds)
# show the timer and control prompts
display.display("Pausing for %d seconds%s" % (seconds, echo_prompt))
display.display("(ctrl+C then 'C' = continue early, ctrl+C then 'A' = abort)\r"),
# show the prompt specified in the task
if 'prompt' in self._task.args:
display.display(prompt)
else:
display.display(prompt)
# save the attributes on the existing (duped) stdin so
# that we can restore them later after we set raw mode
stdin_fd = None
stdout_fd = None
try:
if PY3:
stdin = self._connection._new_stdin.buffer
stdout = sys.stdout.buffer
else:
stdin = self._connection._new_stdin
stdout = sys.stdout
stdin_fd = stdin.fileno()
stdout_fd = stdout.fileno()
except (ValueError, AttributeError):
# ValueError: someone is using a closed file descriptor as stdin
# AttributeError: someone is using a null file descriptor as stdin on windoez
stdin = None
if stdin_fd is not None:
if isatty(stdin_fd):
# grab actual Ctrl+C sequence
try:
intr = termios.tcgetattr(stdin_fd)[6][termios.VINTR]
except Exception:
# unsupported/not present, use default
intr = b'\x03' # value for Ctrl+C
# get backspace sequences
try:
backspace = termios.tcgetattr(stdin_fd)[6][termios.VERASE]
except Exception:
backspace = [b'\x7f', b'\x08']
old_settings = termios.tcgetattr(stdin_fd)
tty.setraw(stdin_fd)
# Only set stdout to raw mode if it is a TTY. This is needed when redirecting
# stdout to a file since a file cannot be set to raw mode.
if isatty(stdout_fd):
tty.setraw(stdout_fd)
# Only echo input if no timeout is specified
if not seconds and echo:
new_settings = termios.tcgetattr(stdin_fd)
new_settings[3] = new_settings[3] | termios.ECHO
termios.tcsetattr(stdin_fd, termios.TCSANOW, new_settings)
# flush the buffer to make sure no previous key presses
# are read in below
termios.tcflush(stdin, termios.TCIFLUSH)
while True:
try:
if stdin_fd is not None:
key_pressed = stdin.read(1)
if key_pressed == intr: # value for Ctrl+C
clear_line(stdout)
raise KeyboardInterrupt
if not seconds:
if stdin_fd is None or not isatty(stdin_fd):
display.warning("Not waiting for response to prompt as stdin is not interactive")
break
# read key presses and act accordingly
if key_pressed in (b'\r', b'\n'):
clear_line(stdout)
break
elif key_pressed in backspace:
# delete a character if backspace is pressed
result['user_input'] = result['user_input'][:-1]
clear_line(stdout)
if echo:
stdout.write(result['user_input'])
stdout.flush()
else:
result['user_input'] += key_pressed
except KeyboardInterrupt:
signal.alarm(0)
display.display("Press 'C' to continue the play or 'A' to abort \r"),
if self._c_or_a(stdin):
clear_line(stdout)
break
clear_line(stdout)
raise AnsibleError('user requested abort!')
except AnsibleTimeoutExceeded:
# this is the exception we expect when the alarm signal
# fires, so we simply ignore it to move into the cleanup
pass
finally:
# cleanup and save some information
# restore the old settings for the duped stdin stdin_fd
if not(None in (stdin_fd, old_settings)) and isatty(stdin_fd):
termios.tcsetattr(stdin_fd, termios.TCSADRAIN, old_settings)
duration = time.time() - start
result['stop'] = to_text(datetime.datetime.now())
result['delta'] = int(duration)
if duration_unit == 'minutes':
duration = round(duration / 60.0, 2)
else:
duration = round(duration, 2)
result['stdout'] = "Paused for %s %s" % (duration, duration_unit)
result['user_input'] = to_text(result['user_input'], errors='surrogate_or_strict')
return result
def _c_or_a(self, stdin):
while True:
key_pressed = stdin.read(1)
if key_pressed.lower() == b'a':
return False
elif key_pressed.lower() == b'c':
return True
| gpl-3.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/scipy/optimize/tests/test_hungarian.py | 19 | 2155 | # Author: Brian M. Clapper, G. Varoquaux, Lars Buitinck
# License: BSD
from numpy.testing import assert_array_equal
from pytest import raises as assert_raises
import numpy as np
from scipy.optimize import linear_sum_assignment
def test_linear_sum_assignment():
for cost_matrix, expected_cost in [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
[150, 400, 300]
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
[150, 2, 300]),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
[10, 1, 7]),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
[10, 1, 4]),
# n == 2, m == 0 matrix
([[], []],
[]),
]:
cost_matrix = np.array(cost_matrix)
row_ind, col_ind = linear_sum_assignment(cost_matrix)
assert_array_equal(row_ind, np.sort(row_ind))
assert_array_equal(expected_cost, cost_matrix[row_ind, col_ind])
cost_matrix = cost_matrix.T
row_ind, col_ind = linear_sum_assignment(cost_matrix)
assert_array_equal(row_ind, np.sort(row_ind))
assert_array_equal(np.sort(expected_cost),
np.sort(cost_matrix[row_ind, col_ind]))
def test_linear_sum_assignment_input_validation():
assert_raises(ValueError, linear_sum_assignment, [1, 2, 3])
C = [[1, 2, 3], [4, 5, 6]]
assert_array_equal(linear_sum_assignment(C),
linear_sum_assignment(np.asarray(C)))
assert_array_equal(linear_sum_assignment(C),
linear_sum_assignment(np.matrix(C)))
I = np.identity(3)
assert_array_equal(linear_sum_assignment(I.astype(np.bool)),
linear_sum_assignment(I))
assert_raises(ValueError, linear_sum_assignment, I.astype(str))
I[0][0] = np.nan
assert_raises(ValueError, linear_sum_assignment, I)
I = np.identity(3)
I[1][1] = np.inf
assert_raises(ValueError, linear_sum_assignment, I)
| gpl-3.0 |
michaelrice/gotland | tests/__init__.py | 1 | 1192 | # Copyright 2014 Michael Rice <michael@michaelrice.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
def tests_resource_path(local_path=''):
this_file = os.path.dirname(os.path.abspath(__file__))
return os.path.join(this_file, local_path)
# Full path to the fixtures directory underneath this module
fixtures_path = tests_resource_path(local_path='fixtures')
class VCRBasedTests(unittest.TestCase):
def setUp(self):
logging.basicConfig()
vcr_log = logging.getLogger('vcr')
vcr_log.setLevel(logging.DEBUG)
| apache-2.0 |
justquick/django-activity-stream | runtests/testapp/migrations/0001_initial.py | 9 | 2259 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('auth', '__first__'),
]
operations = [
migrations.CreateModel(
name='MyUser',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(null=True, verbose_name='last login', blank=True)),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(unique=True, max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=True)),
('groups', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', verbose_name='groups')),
('user_permissions', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Player',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('state', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Unregistered',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
],
options={
'abstract': False,
},
),
]
| bsd-3-clause |
demianw/dipy | dipy/sims/voxel.py | 4 | 26726 | from __future__ import division
import numpy as np
from numpy import dot
from dipy.core.geometry import sphere2cart
from dipy.core.geometry import vec2vec_rotmat
from dipy.reconst.utils import dki_design_matrix
# Diffusion coefficients for white matter tracts, in mm^2/s
#
# Based roughly on values from:
#
# Pierpaoli, Basser, "Towards a Quantitative Assessment of Diffusion
# Anisotropy", Magnetic Resonance in Medicine, 1996; 36(6):893-906.
#
diffusion_evals = np.array([1500e-6, 400e-6, 400e-6])
def _check_directions(angles):
"""
Helper function to check if direction ground truth have the right format
and are in cartesian coordinates
Parameters
-----------
angles : array (K,2) or (K, 3)
List of K polar angles (in degrees) for the sticks or array of K
sticks as unit vectors.
Returns
--------
sticks : (K,3)
Sticks in cartesian coordinates.
"""
angles = np.array(angles)
if angles.shape[-1] == 3:
sticks = angles
else:
sticks = [sphere2cart(1, np.deg2rad(pair[0]), np.deg2rad(pair[1]))
for pair in angles]
sticks = np.array(sticks)
return sticks
def _add_gaussian(sig, noise1, noise2):
"""
Helper function to add_noise
This one simply adds one of the Gaussians to the sig and ignores the other
one.
"""
return sig + noise1
def _add_rician(sig, noise1, noise2):
"""
Helper function to add_noise.
This does the same as abs(sig + complex(noise1, noise2))
"""
return np.sqrt((sig + noise1) ** 2 + noise2 ** 2)
def _add_rayleigh(sig, noise1, noise2):
"""
Helper function to add_noise
The Rayleigh distribution is $\sqrt\{Gauss_1^2 + Gauss_2^2}$.
"""
return sig + np.sqrt(noise1 ** 2 + noise2 ** 2)
def add_noise(signal, snr, S0, noise_type='rician'):
r""" Add noise of specified distribution to the signal from a single voxel.
Parameters
-----------
signal : 1-d ndarray
The signal in the voxel.
snr : float
The desired signal-to-noise ratio. (See notes below.)
If `snr` is None, return the signal as-is.
S0 : float
Reference signal for specifying `snr`.
noise_type : string, optional
The distribution of noise added. Can be either 'gaussian' for Gaussian
distributed noise, 'rician' for Rice-distributed noise (default) or
'rayleigh' for a Rayleigh distribution.
Returns
--------
signal : array, same shape as the input
Signal with added noise.
Notes
-----
SNR is defined here, following [1]_, as ``S0 / sigma``, where ``sigma`` is
the standard deviation of the two Gaussian distributions forming the real
and imaginary components of the Rician noise distribution (see [2]_).
References
----------
.. [1] Descoteaux, Angelino, Fitzgibbons and Deriche (2007) Regularized,
fast and robust q-ball imaging. MRM, 58: 497-510
.. [2] Gudbjartson and Patz (2008). The Rician distribution of noisy MRI
data. MRM 34: 910-914.
Examples
--------
>>> signal = np.arange(800).reshape(2, 2, 2, 100)
>>> signal_w_noise = add_noise(signal, 10., 100., noise_type='rician')
"""
if snr is None:
return signal
sigma = S0 / snr
noise_adder = {'gaussian': _add_gaussian,
'rician': _add_rician,
'rayleigh': _add_rayleigh}
noise1 = np.random.normal(0, sigma, size=signal.shape)
if noise_type == 'gaussian':
noise2 = None
else:
noise2 = np.random.normal(0, sigma, size=signal.shape)
return noise_adder[noise_type](signal, noise1, noise2)
def sticks_and_ball(gtab, d=0.0015, S0=100, angles=[(0, 0), (90, 0)],
fractions=[35, 35], snr=20):
""" Simulate the signal for a Sticks & Ball model.
Parameters
-----------
gtab : GradientTable
Signal measurement directions.
d : float
Diffusivity value.
S0 : float
Unweighted signal value.
angles : array (K,2) or (K, 3)
List of K polar angles (in degrees) for the sticks or array of K
sticks as unit vectors.
fractions : float
Percentage of each stick. Remainder to 100 specifies isotropic
component.
snr : float
Signal to noise ratio, assuming Rician noise. If set to None, no
noise is added.
Returns
--------
S : (N,) ndarray
Simulated signal.
sticks : (M,3)
Sticks in cartesian coordinates.
References
----------
.. [1] Behrens et al., "Probabilistic diffusion
tractography with multiple fiber orientations: what can we gain?",
Neuroimage, 2007.
"""
fractions = [f / 100. for f in fractions]
f0 = 1 - np.sum(fractions)
S = np.zeros(len(gtab.bvals))
sticks = _check_directions(angles)
for (i, g) in enumerate(gtab.bvecs[1:]):
S[i + 1] = f0*np.exp(-gtab.bvals[i + 1]*d) + \
np.sum([fractions[j]*np.exp(-gtab.bvals[i + 1]*d*np.dot(s, g)**2)
for (j, s) in enumerate(sticks)])
S[i + 1] = S0 * S[i + 1]
S[gtab.b0s_mask] = S0
S = add_noise(S, snr, S0)
return S, sticks
def single_tensor(gtab, S0=1, evals=None, evecs=None, snr=None):
""" Simulated Q-space signal with a single tensor.
Parameters
-----------
gtab : GradientTable
Measurement directions.
S0 : double,
Strength of signal in the presence of no diffusion gradient (also
called the ``b=0`` value).
evals : (3,) ndarray
Eigenvalues of the diffusion tensor. By default, values typical for
prolate white matter are used.
evecs : (3, 3) ndarray
Eigenvectors of the tensor. You can also think of this as a rotation
matrix that transforms the direction of the tensor. The eigenvectors
need to be column wise.
snr : float
Signal to noise ratio, assuming Rician noise. None implies no noise.
Returns
--------
S : (N,) ndarray
Simulated signal: ``S(q, tau) = S_0 e^(-b g^T R D R.T g)``.
References
----------
.. [1] M. Descoteaux, "High Angular Resolution Diffusion MRI: from Local
Estimation to Segmentation and Tractography", PhD thesis,
University of Nice-Sophia Antipolis, p. 42, 2008.
.. [2] E. Stejskal and J. Tanner, "Spin diffusion measurements: spin echos
in the presence of a time-dependent field gradient", Journal of
Chemical Physics, nr. 42, pp. 288--292, 1965.
"""
if evals is None:
evals = diffusion_evals
if evecs is None:
evecs = np.eye(3)
out_shape = gtab.bvecs.shape[:gtab.bvecs.ndim - 1]
gradients = gtab.bvecs.reshape(-1, 3)
R = np.asarray(evecs)
S = np.zeros(len(gradients))
D = dot(dot(R, np.diag(evals)), R.T)
for (i, g) in enumerate(gradients):
S[i] = S0 * np.exp(-gtab.bvals[i] * dot(dot(g.T, D), g))
S = add_noise(S, snr, S0)
return S.reshape(out_shape)
def multi_tensor(gtab, mevals, S0=100, angles=[(0, 0), (90, 0)],
fractions=[50, 50], snr=20):
r""" Simulate a Multi-Tensor signal.
Parameters
-----------
gtab : GradientTable
mevals : array (K, 3)
each tensor's eigenvalues in each row
S0 : float
Unweighted signal value (b0 signal).
angles : array (K,2) or (K,3)
List of K tensor directions in polar angles (in degrees) or unit
vectors
fractions : float
Percentage of the contribution of each tensor. The sum of fractions
should be equal to 100%.
snr : float
Signal to noise ratio, assuming Rician noise. If set to None, no
noise is added.
Returns
--------
S : (N,) ndarray
Simulated signal.
sticks : (M,3)
Sticks in cartesian coordinates.
Examples
--------
>>> import numpy as np
>>> from dipy.sims.voxel import multi_tensor
>>> from dipy.data import get_data
>>> from dipy.core.gradients import gradient_table
>>> from dipy.io.gradients import read_bvals_bvecs
>>> fimg, fbvals, fbvecs = get_data('small_101D')
>>> bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs)
>>> gtab = gradient_table(bvals, bvecs)
>>> mevals=np.array(([0.0015, 0.0003, 0.0003],[0.0015, 0.0003, 0.0003]))
>>> e0 = np.array([1, 0, 0.])
>>> e1 = np.array([0., 1, 0])
>>> S = multi_tensor(gtab, mevals)
"""
if np.round(np.sum(fractions), 2) != 100.0:
raise ValueError('Fractions should sum to 100')
fractions = [f / 100. for f in fractions]
S = np.zeros(len(gtab.bvals))
sticks = _check_directions(angles)
for i in range(len(fractions)):
S = S + fractions[i] * single_tensor(gtab, S0=S0, evals=mevals[i],
evecs=all_tensor_evecs(
sticks[i]), snr=None)
return add_noise(S, snr, S0), sticks
def multi_tensor_dki(gtab, mevals, S0=100, angles=[(90., 0.), (90., 0.)],
fractions=[50, 50], snr=20):
r""" Simulate the diffusion-weight signal, diffusion and kurtosis tensors
based on the DKI model
Parameters
-----------
gtab : GradientTable
mevals : array (K, 3)
eigenvalues of the diffusion tensor for each individual compartment
S0 : float (optional)
Unweighted signal value (b0 signal).
angles : array (K,2) or (K,3) (optional)
List of K tensor directions of the diffusion tensor of each compartment
in polar angles (in degrees) or unit vectors
fractions : float (K,) (optional)
Percentage of the contribution of each tensor. The sum of fractions
should be equal to 100%.
snr : float (optional)
Signal to noise ratio, assuming Rician noise. If set to None, no
noise is added.
Returns
--------
S : (N,) ndarray
Simulated signal based on the DKI model.
dt : (6,)
elements of the diffusion tensor.
kt : (15,)
elements of the kurtosis tensor.
Notes
-----
Simulations are based on multicompartmental models which assumes that
tissue is well described by impermeable diffusion compartments
characterized by their only diffusion tensor. Since simulations are based
on the DKI model, coefficients larger than the fourth order of the signal's
taylor expansion approximation are neglected.
Examples
--------
>>> import numpy as np
>>> from dipy.sims.voxel import multi_tensor_dki
>>> from dipy.data import get_data
>>> from dipy.core.gradients import gradient_table
>>> from dipy.io.gradients import read_bvals_bvecs
>>> fimg, fbvals, fbvecs = get_data('small_64D')
>>> bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs)
>>> bvals_2s = np.concatenate((bvals, bvals * 2), axis=0)
>>> bvecs_2s = np.concatenate((bvecs, bvecs), axis=0)
>>> gtab = gradient_table(bvals_2s, bvecs_2s)
>>> mevals = np.array([[0.00099, 0, 0],[0.00226, 0.00087, 0.00087]])
>>> S, dt, kt = multi_tensor_dki(gtab, mevals)
References
----------
.. [1] R. Neto Henriques et al., "Exploring the 3D geometry of the
diffusion kurtosis tensor - Impact on the development of robust
tractography procedures and novel biomarkers", NeuroImage (2015)
111, 85-99.
"""
if np.round(np.sum(fractions), 2) != 100.0:
raise ValueError('Fractions should sum to 100')
fractions = [f / 100. for f in fractions]
S = np.zeros(len(gtab.bvals))
sticks = _check_directions(angles)
# computing a 3D matrix containing the individual DT components
D_comps = np.zeros((len(fractions), 3, 3))
for i in range(len(fractions)):
R = all_tensor_evecs(sticks[i])
D_comps[i] = dot(dot(R, np.diag(mevals[i])), R.T)
# compute voxel's DT
DT = np.zeros((3, 3))
for i in range(len(fractions)):
DT = DT + fractions[i]*D_comps[i]
dt = np.array([DT[0][0], DT[0][1], DT[1][1], DT[0][2], DT[1][2], DT[2][2]])
# compute voxel's MD
MD = (DT[0][0] + DT[1][1] + DT[2][2]) / 3
# compute voxel's KT
kt = np.zeros((15))
kt[0] = kurtosis_element(D_comps, fractions, 0, 0, 0, 0, DT, MD)
kt[1] = kurtosis_element(D_comps, fractions, 1, 1, 1, 1, DT, MD)
kt[2] = kurtosis_element(D_comps, fractions, 2, 2, 2, 2, DT, MD)
kt[3] = kurtosis_element(D_comps, fractions, 0, 0, 0, 1, DT, MD)
kt[4] = kurtosis_element(D_comps, fractions, 0, 0, 0, 2, DT, MD)
kt[5] = kurtosis_element(D_comps, fractions, 0, 1, 1, 1, DT, MD)
kt[6] = kurtosis_element(D_comps, fractions, 1, 1, 1, 2, DT, MD)
kt[7] = kurtosis_element(D_comps, fractions, 0, 2, 2, 2, DT, MD)
kt[8] = kurtosis_element(D_comps, fractions, 1, 2, 2, 2, DT, MD)
kt[9] = kurtosis_element(D_comps, fractions, 0, 0, 1, 1, DT, MD)
kt[10] = kurtosis_element(D_comps, fractions, 0, 0, 2, 2, DT, MD)
kt[11] = kurtosis_element(D_comps, fractions, 1, 1, 2, 2, DT, MD)
kt[12] = kurtosis_element(D_comps, fractions, 0, 0, 1, 2, DT, MD)
kt[13] = kurtosis_element(D_comps, fractions, 0, 1, 1, 2, DT, MD)
kt[14] = kurtosis_element(D_comps, fractions, 0, 1, 2, 2, DT, MD)
# compute S based on the DT and KT
S = DKI_signal(gtab, dt, kt, S0, snr)
return S, dt, kt
def kurtosis_element(D_comps, frac, ind_i, ind_j, ind_k, ind_l, DT=None,
MD=None):
r""" Computes the diffusion kurtosis tensor element (with indexes i, j, k
and l) based on the individual diffusion tensor components of a
multicompartmental model.
Parameters
-----------
D_comps : (K,3,3) ndarray
Diffusion tensors for all K individual compartment of the
multicompartmental model.
frac : float
Percentage of the contribution of each tensor. The sum of fractions
should be equal to 100%.
ind_i : int
Element's index i (0 for x, 1 for y, 2 for z)
ind_j : int
Element's index j (0 for x, 1 for y, 2 for z)
ind_k : int
Element's index k (0 for x, 1 for y, 2 for z)
ind_l: int
Elements index l (0 for x, 1 for y, 2 for z)
DT : (3,3) ndarray (optional)
Voxel's global diffusion tensor.
MD : float (optional)
Voxel's global mean diffusivity.
Returns
--------
wijkl : float
kurtosis tensor element of index i, j, k, l
Notes
--------
wijkl is calculated using equation 8 given in [1]_
References
----------
.. [1] R. Neto Henriques et al., "Exploring the 3D geometry of the
diffusion kurtosis tensor - Impact on the development of robust
tractography procedures and novel biomarkers", NeuroImage (2015)
111, 85-99.
"""
if DT is None:
DT = np.zeros((3, 3))
for i in range(len(frac)):
DT = DT + frac[i]*D_comps[i]
if MD is None:
MD = (DT[0][0] + DT[1][1] + DT[2][2]) / 3
wijkl = 0
for f in range(len(frac)):
wijkl = wijkl + frac[f] * (
D_comps[f][ind_i][ind_j]*D_comps[f][ind_k][ind_l] +
D_comps[f][ind_i][ind_k]*D_comps[f][ind_j][ind_l] +
D_comps[f][ind_i][ind_l]*D_comps[f][ind_j][ind_k])
wijkl = (wijkl - DT[ind_i][ind_j]*DT[ind_k][ind_l] -
DT[ind_i][ind_k]*DT[ind_j][ind_l] -
DT[ind_i][ind_l]*DT[ind_j][ind_k]) / (MD**2)
return wijkl
def DKI_signal(gtab, dt, kt, S0=150, snr=None):
r""" Simulated signal based on the diffusion and diffusion kurtosis
tensors of a single voxel. Simulations are preformed assuming the DKI
model.
Parameters
-----------
gtab : GradientTable
Measurement directions.
dt : (6,) ndarray
Elements of the diffusion tensor.
kt : (15, ) ndarray
Elements of the diffusion kurtosis tensor.
S0 : float (optional)
Strength of signal in the presence of no diffusion gradient.
snr : float (optional)
Signal to noise ratio, assuming Rician noise. None implies no noise.
Returns
--------
S : (N,) ndarray
Simulated signal based on the DKI model:
.. math::
S=S_{0}e^{-bD+\frac{1}{6}b^{2}D^{2}K}
References
----------
.. [1] R. Neto Henriques et al., "Exploring the 3D geometry of the
diffusion kurtosis tensor - Impact on the development of robust
tractography procedures and novel biomarkers", NeuroImage (2015)
111, 85-99.
"""
dt = np.array(dt)
kt = np.array(kt)
A = dki_design_matrix(gtab)
# define vector of DKI parameters
MD = (dt[0] + dt[2] + dt[5]) / 3
X = np.concatenate((dt, kt*MD*MD, np.array([np.log(S0)])), axis=0)
# Compute signals based on the DKI model
S = np.exp(dot(A, X))
S = add_noise(S, snr, S0)
return S
def single_tensor_odf(r, evals=None, evecs=None):
""" Simulated ODF with a single tensor.
Parameters
----------
r : (N,3) or (M,N,3) ndarray
Measurement positions in (x, y, z), either as a list or on a grid.
evals : (3,)
Eigenvalues of diffusion tensor. By default, use values typical for
prolate white matter.
evecs : (3, 3) ndarray
Eigenvectors of the tensor, written column-wise. You can also think
of these as the rotation matrix that determines the orientation of
the diffusion tensor.
Returns
-------
ODF : (N,) ndarray
The diffusion probability at ``r`` after time ``tau``.
References
----------
.. [1] Aganj et al., "Reconstruction of the Orientation Distribution
Function in Single- and Multiple-Shell q-Ball Imaging Within
Constant Solid Angle", Magnetic Resonance in Medicine, nr. 64,
pp. 554--566, 2010.
"""
if evals is None:
evals = diffusion_evals
if evecs is None:
evecs = np.eye(3)
out_shape = r.shape[:r.ndim - 1]
R = np.asarray(evecs)
D = dot(dot(R, np.diag(evals)), R.T)
Di = np.linalg.inv(D)
r = r.reshape(-1, 3)
P = np.zeros(len(r))
for (i, u) in enumerate(r):
P[i] = (dot(dot(u.T, Di), u)) ** (3 / 2)
return (1 / (4 * np.pi * np.prod(evals) ** (1 / 2) * P)).reshape(out_shape)
def all_tensor_evecs(e0):
"""Given the principle tensor axis, return the array of all
eigenvectors column-wise (or, the rotation matrix that orientates the
tensor).
Parameters
----------
e0 : (3,) ndarray
Principle tensor axis.
Returns
-------
evecs : (3,3) ndarray
Tensor eigenvectors, arranged column-wise.
"""
axes = np.eye(3)
mat = vec2vec_rotmat(axes[0], e0)
e1 = np.dot(mat, axes[1])
e2 = np.dot(mat, axes[2])
# Return the eigenvectors column-wise:
return np.array([e0, e1, e2]).T
def multi_tensor_odf(odf_verts, mevals, angles, fractions):
r'''Simulate a Multi-Tensor ODF.
Parameters
----------
odf_verts : (N,3) ndarray
Vertices of the reconstruction sphere.
mevals : sequence of 1D arrays,
Eigen-values for each tensor.
angles : sequence of 2d tuples,
Sequence of principal directions for each tensor in polar angles
or cartesian unit coordinates.
fractions : sequence of floats,
Percentages of the fractions for each tensor.
Returns
-------
ODF : (N,) ndarray
Orientation distribution function.
Examples
--------
Simulate a MultiTensor ODF with two peaks and calculate its exact ODF.
>>> import numpy as np
>>> from dipy.sims.voxel import multi_tensor_odf, all_tensor_evecs
>>> from dipy.data import get_sphere
>>> sphere = get_sphere('symmetric724')
>>> vertices, faces = sphere.vertices, sphere.faces
>>> mevals = np.array(([0.0015, 0.0003, 0.0003],[0.0015, 0.0003, 0.0003]))
>>> angles = [(0, 0), (90, 0)]
>>> odf = multi_tensor_odf(vertices, mevals, angles, [50, 50])
'''
mf = [f / 100. for f in fractions]
sticks = _check_directions(angles)
odf = np.zeros(len(odf_verts))
mevecs = []
for s in sticks:
mevecs += [all_tensor_evecs(s)]
for (j, f) in enumerate(mf):
odf += f * single_tensor_odf(odf_verts,
evals=mevals[j], evecs=mevecs[j])
return odf
def single_tensor_rtop(evals=None, tau=1.0 / (4 * np.pi ** 2)):
r'''Simulate a Multi-Tensor rtop.
Parameters
----------
evals : 1D arrays,
Eigen-values for the tensor. By default, values typical for prolate
white matter are used.
tau : float,
diffusion time. By default the value that makes q=sqrt(b).
Returns
-------
rtop : float,
Return to origin probability.
References
----------
.. [1] Cheng J., "Estimation and Processing of Ensemble Average Propagator
and Its Features in Diffusion MRI", PhD Thesis, 2012.
'''
if evals is None:
evals = diffusion_evals
rtop = 1.0 / np.sqrt((4 * np.pi * tau) ** 3 * np.prod(evals))
return rtop
def multi_tensor_rtop(mf, mevals=None, tau=1 / (4 * np.pi ** 2)):
r'''Simulate a Multi-Tensor rtop.
Parameters
----------
mf : sequence of floats, bounded [0,1]
Percentages of the fractions for each tensor.
mevals : sequence of 1D arrays,
Eigen-values for each tensor. By default, values typical for prolate
white matter are used.
tau : float,
diffusion time. By default the value that makes q=sqrt(b).
Returns
-------
rtop : float,
Return to origin probability.
References
----------
.. [1] Cheng J., "Estimation and Processing of Ensemble Average Propagator
and Its Features in Diffusion MRI", PhD Thesis, 2012.
'''
rtop = 0
if mevals is None:
mevals = [None, ] * len(mf)
for j, f in enumerate(mf):
rtop += f * single_tensor_rtop(mevals[j], tau=tau)
return rtop
def single_tensor_pdf(r, evals=None, evecs=None, tau=1 / (4 * np.pi ** 2)):
"""Simulated ODF with a single tensor.
Parameters
----------
r : (N,3) or (M,N,3) ndarray
Measurement positions in (x, y, z), either as a list or on a grid.
evals : (3,)
Eigenvalues of diffusion tensor. By default, use values typical for
prolate white matter.
evecs : (3, 3) ndarray
Eigenvectors of the tensor. You can also think of these as the
rotation matrix that determines the orientation of the diffusion
tensor.
tau : float,
diffusion time. By default the value that makes q=sqrt(b).
Returns
-------
pdf : (N,) ndarray
The diffusion probability at ``r`` after time ``tau``.
References
----------
.. [1] Cheng J., "Estimation and Processing of Ensemble Average Propagator
and Its Features in Diffusion MRI", PhD Thesis, 2012.
"""
if evals is None:
evals = diffusion_evals
if evecs is None:
evecs = np.eye(3)
out_shape = r.shape[:r.ndim - 1]
R = np.asarray(evecs)
D = dot(dot(R, np.diag(evals)), R.T)
Di = np.linalg.inv(D)
r = r.reshape(-1, 3)
P = np.zeros(len(r))
for (i, u) in enumerate(r):
P[i] = (-dot(dot(u.T, Di), u)) / (4 * tau)
pdf = (1 / np.sqrt((4 * np.pi * tau) ** 3 * np.prod(evals))) * np.exp(P)
return pdf.reshape(out_shape)
def multi_tensor_pdf(pdf_points, mevals, angles, fractions,
tau=1 / (4 * np.pi ** 2)):
r'''Simulate a Multi-Tensor ODF.
Parameters
----------
pdf_points : (N, 3) ndarray
Points to evaluate the PDF.
mevals : sequence of 1D arrays,
Eigen-values for each tensor. By default, values typical for prolate
white matter are used.
angles : sequence,
Sequence of principal directions for each tensor in polar angles
or cartesian unit coordinates.
fractions : sequence of floats,
Percentages of the fractions for each tensor.
tau : float,
diffusion time. By default the value that makes q=sqrt(b).
Returns
-------
pdf : (N,) ndarray,
Probability density function of the water displacement.
References
----------
.. [1] Cheng J., "Estimation and Processing of Ensemble Average Propagator
and its Features in Diffusion MRI", PhD Thesis, 2012.
'''
mf = [f / 100. for f in fractions]
sticks = _check_directions(angles)
pdf = np.zeros(len(pdf_points))
mevecs = []
for s in sticks:
mevecs += [all_tensor_evecs(s)]
for j, f in enumerate(mf):
pdf += f * single_tensor_pdf(pdf_points,
evals=mevals[j], evecs=mevecs[j], tau=tau)
return pdf
def single_tensor_msd(evals=None, tau=1 / (4 * np.pi ** 2)):
r'''Simulate a Multi-Tensor rtop.
Parameters
----------
evals : 1D arrays,
Eigen-values for the tensor. By default, values typical for prolate
white matter are used.
tau : float,
diffusion time. By default the value that makes q=sqrt(b).
Returns
-------
msd : float,
Mean square displacement.
References
----------
.. [1] Cheng J., "Estimation and Processing of Ensemble Average Propagator
and Its Features in Diffusion MRI", PhD Thesis, 2012.
'''
if evals is None:
evals = diffusion_evals
msd = 2 * tau * np.sum(evals)
return msd
def multi_tensor_msd(mf, mevals=None, tau=1 / (4 * np.pi ** 2)):
r'''Simulate a Multi-Tensor rtop.
Parameters
----------
mf : sequence of floats, bounded [0,1]
Percentages of the fractions for each tensor.
mevals : sequence of 1D arrays,
Eigen-values for each tensor. By default, values typical for prolate
white matter are used.
tau : float,
diffusion time. By default the value that makes q=sqrt(b).
Returns
-------
msd : float,
Mean square displacement.
References
----------
.. [1] Cheng J., "Estimation and Processing of Ensemble Average Propagator
and Its Features in Diffusion MRI", PhD Thesis, 2012.
'''
msd = 0
if mevals is None:
mevals = [None, ] * len(mf)
for j, f in enumerate(mf):
msd += f * single_tensor_msd(mevals[j], tau=tau)
return msd
# Use standard naming convention, but keep old names
# for backward compatibility
SticksAndBall = sticks_and_ball
SingleTensor = single_tensor
MultiTensor = multi_tensor
| bsd-3-clause |
lzppp/mylearning | ryu/contrib/ovs/db/data.py | 50 | 19222 | # Copyright (c) 2009, 2010, 2011 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import uuid
import ovs.poller
import ovs.socket_util
import ovs.json
import ovs.jsonrpc
import ovs.ovsuuid
import ovs.db.parser
from ovs.db import error
import ovs.db.types
class ConstraintViolation(error.Error):
def __init__(self, msg, json=None):
error.Error.__init__(self, msg, json, tag="constraint violation")
def escapeCString(src):
dst = []
for c in src:
if c in "\\\"":
dst.append("\\" + c)
elif ord(c) < 32:
if c == '\n':
dst.append('\\n')
elif c == '\r':
dst.append('\\r')
elif c == '\a':
dst.append('\\a')
elif c == '\b':
dst.append('\\b')
elif c == '\f':
dst.append('\\f')
elif c == '\t':
dst.append('\\t')
elif c == '\v':
dst.append('\\v')
else:
dst.append('\\%03o' % ord(c))
else:
dst.append(c)
return ''.join(dst)
def returnUnchanged(x):
return x
class Atom(object):
def __init__(self, type_, value=None):
self.type = type_
if value is not None:
self.value = value
else:
self.value = type_.default_atom()
def __cmp__(self, other):
if not isinstance(other, Atom) or self.type != other.type:
return NotImplemented
elif self.value < other.value:
return -1
elif self.value > other.value:
return 1
else:
return 0
def __hash__(self):
return hash(self.value)
@staticmethod
def default(type_):
"""Returns the default value for the given type_, which must be an
instance of ovs.db.types.AtomicType.
The default value for each atomic type is;
- 0, for integer or real atoms.
- False, for a boolean atom.
- "", for a string atom.
- The all-zeros UUID, for a UUID atom."""
return Atom(type_)
def is_default(self):
return self == self.default(self.type)
@staticmethod
def from_json(base, json, symtab=None):
type_ = base.type
json = ovs.db.parser.float_to_int(json)
if ((type_ == ovs.db.types.IntegerType and type(json) in [int, long])
or (type_ == ovs.db.types.RealType
and type(json) in [int, long, float])
or (type_ == ovs.db.types.BooleanType and type(json) == bool)
or (type_ == ovs.db.types.StringType
and type(json) in [str, unicode])):
atom = Atom(type_, json)
elif type_ == ovs.db.types.UuidType:
atom = Atom(type_, ovs.ovsuuid.from_json(json, symtab))
else:
raise error.Error("expected %s" % type_.to_string(), json)
atom.check_constraints(base)
return atom
@staticmethod
def from_python(base, value):
value = ovs.db.parser.float_to_int(value)
if type(value) in base.type.python_types:
atom = Atom(base.type, value)
else:
raise error.Error("expected %s, got %s" % (base.type, type(value)))
atom.check_constraints(base)
return atom
def check_constraints(self, base):
"""Checks whether 'atom' meets the constraints (if any) defined in
'base' and raises an ovs.db.error.Error if any constraint is violated.
'base' and 'atom' must have the same type.
Checking UUID constraints is deferred to transaction commit time, so
this function does nothing for UUID constraints."""
assert base.type == self.type
if base.enum is not None and self not in base.enum:
raise ConstraintViolation(
"%s is not one of the allowed values (%s)"
% (self.to_string(), base.enum.to_string()))
elif base.type in [ovs.db.types.IntegerType, ovs.db.types.RealType]:
if ((base.min is None or self.value >= base.min) and
(base.max is None or self.value <= base.max)):
pass
elif base.min is not None and base.max is not None:
raise ConstraintViolation(
"%s is not in the valid range %.15g to %.15g (inclusive)"
% (self.to_string(), base.min, base.max))
elif base.min is not None:
raise ConstraintViolation(
"%s is less than minimum allowed value %.15g"
% (self.to_string(), base.min))
else:
raise ConstraintViolation(
"%s is greater than maximum allowed value %.15g"
% (self.to_string(), base.max))
elif base.type == ovs.db.types.StringType:
# XXX The C version validates that the string is valid UTF-8 here.
# Do we need to do that in Python too?
s = self.value
length = len(s)
if length < base.min_length:
raise ConstraintViolation(
'"%s" length %d is less than minimum allowed length %d'
% (s, length, base.min_length))
elif length > base.max_length:
raise ConstraintViolation(
'"%s" length %d is greater than maximum allowed '
'length %d' % (s, length, base.max_length))
def to_json(self):
if self.type == ovs.db.types.UuidType:
return ovs.ovsuuid.to_json(self.value)
else:
return self.value
def cInitAtom(self, var):
if self.type == ovs.db.types.IntegerType:
return ['%s.integer = %d;' % (var, self.value)]
elif self.type == ovs.db.types.RealType:
return ['%s.real = %.15g;' % (var, self.value)]
elif self.type == ovs.db.types.BooleanType:
if self.value:
return ['%s.boolean = true;']
else:
return ['%s.boolean = false;']
elif self.type == ovs.db.types.StringType:
return ['%s.string = xstrdup("%s");'
% (var, escapeCString(self.value))]
elif self.type == ovs.db.types.UuidType:
return ovs.ovsuuid.to_c_assignment(self.value, var)
def toEnglish(self, escapeLiteral=returnUnchanged):
if self.type == ovs.db.types.IntegerType:
return '%d' % self.value
elif self.type == ovs.db.types.RealType:
return '%.15g' % self.value
elif self.type == ovs.db.types.BooleanType:
if self.value:
return 'true'
else:
return 'false'
elif self.type == ovs.db.types.StringType:
return escapeLiteral(self.value)
elif self.type == ovs.db.types.UuidType:
return self.value.value
__need_quotes_re = re.compile("$|true|false|[^_a-zA-Z]|.*[^-._a-zA-Z]")
@staticmethod
def __string_needs_quotes(s):
return Atom.__need_quotes_re.match(s)
def to_string(self):
if self.type == ovs.db.types.IntegerType:
return '%d' % self.value
elif self.type == ovs.db.types.RealType:
return '%.15g' % self.value
elif self.type == ovs.db.types.BooleanType:
if self.value:
return 'true'
else:
return 'false'
elif self.type == ovs.db.types.StringType:
if Atom.__string_needs_quotes(self.value):
return ovs.json.to_string(self.value)
else:
return self.value
elif self.type == ovs.db.types.UuidType:
return str(self.value)
@staticmethod
def new(x):
if type(x) in [int, long]:
t = ovs.db.types.IntegerType
elif type(x) == float:
t = ovs.db.types.RealType
elif x in [False, True]:
t = ovs.db.types.BooleanType
elif type(x) in [str, unicode]:
t = ovs.db.types.StringType
elif isinstance(x, uuid):
t = ovs.db.types.UuidType
else:
raise TypeError
return Atom(t, x)
class Datum(object):
def __init__(self, type_, values={}):
self.type = type_
self.values = values
def __cmp__(self, other):
if not isinstance(other, Datum):
return NotImplemented
elif self.values < other.values:
return -1
elif self.values > other.values:
return 1
else:
return 0
__hash__ = None
def __contains__(self, item):
return item in self.values
def copy(self):
return Datum(self.type, dict(self.values))
@staticmethod
def default(type_):
if type_.n_min == 0:
values = {}
elif type_.is_map():
values = {type_.key.default(): type_.value.default()}
else:
values = {type_.key.default(): None}
return Datum(type_, values)
def is_default(self):
return self == Datum.default(self.type)
def check_constraints(self):
"""Checks that each of the atoms in 'datum' conforms to the constraints
specified by its 'type' and raises an ovs.db.error.Error.
This function is not commonly useful because the most ordinary way to
obtain a datum is ultimately via Datum.from_json() or Atom.from_json(),
which check constraints themselves."""
for keyAtom, valueAtom in self.values.iteritems():
keyAtom.check_constraints(self.type.key)
if valueAtom is not None:
valueAtom.check_constraints(self.type.value)
@staticmethod
def from_json(type_, json, symtab=None):
"""Parses 'json' as a datum of the type described by 'type'. If
successful, returns a new datum. On failure, raises an
ovs.db.error.Error.
Violations of constraints expressed by 'type' are treated as errors.
If 'symtab' is nonnull, then named UUIDs in 'symtab' are accepted.
Refer to ovsdb/SPECS for information about this, and for the syntax
that this function accepts."""
is_map = type_.is_map()
if (is_map or
(type(json) == list and len(json) > 0 and json[0] == "set")):
if is_map:
class_ = "map"
else:
class_ = "set"
inner = ovs.db.parser.unwrap_json(json, class_, [list, tuple],
"array")
n = len(inner)
if n < type_.n_min or n > type_.n_max:
raise error.Error("%s must have %d to %d members but %d are "
"present" % (class_, type_.n_min,
type_.n_max, n),
json)
values = {}
for element in inner:
if is_map:
key, value = ovs.db.parser.parse_json_pair(element)
keyAtom = Atom.from_json(type_.key, key, symtab)
valueAtom = Atom.from_json(type_.value, value, symtab)
else:
keyAtom = Atom.from_json(type_.key, element, symtab)
valueAtom = None
if keyAtom in values:
if is_map:
raise error.Error("map contains duplicate key")
else:
raise error.Error("set contains duplicate")
values[keyAtom] = valueAtom
return Datum(type_, values)
else:
keyAtom = Atom.from_json(type_.key, json, symtab)
return Datum(type_, {keyAtom: None})
def to_json(self):
if self.type.is_map():
return ["map", [[k.to_json(), v.to_json()]
for k, v in sorted(self.values.items())]]
elif len(self.values) == 1:
key = self.values.keys()[0]
return key.to_json()
else:
return ["set", [k.to_json() for k in sorted(self.values.keys())]]
def to_string(self):
head = tail = None
if self.type.n_max > 1 or len(self.values) == 0:
if self.type.is_map():
head = "{"
tail = "}"
else:
head = "["
tail = "]"
s = []
if head:
s.append(head)
for i, key in enumerate(sorted(self.values)):
if i:
s.append(", ")
s.append(key.to_string())
if self.type.is_map():
s.append("=")
s.append(self.values[key].to_string())
if tail:
s.append(tail)
return ''.join(s)
def as_list(self):
if self.type.is_map():
return [[k.value, v.value] for k, v in self.values.iteritems()]
else:
return [k.value for k in self.values.iterkeys()]
def as_dict(self):
return dict(self.values)
def as_scalar(self):
if len(self.values) == 1:
if self.type.is_map():
k, v = self.values.iteritems()[0]
return [k.value, v.value]
else:
return self.values.keys()[0].value
else:
return None
def to_python(self, uuid_to_row):
"""Returns this datum's value converted into a natural Python
representation of this datum's type, according to the following
rules:
- If the type has exactly one value and it is not a map (that is,
self.type.is_scalar() returns True), then the value is:
* An int or long, for an integer column.
* An int or long or float, for a real column.
* A bool, for a boolean column.
* A str or unicode object, for a string column.
* A uuid.UUID object, for a UUID column without a ref_table.
* An object represented the referenced row, for a UUID column with
a ref_table. (For the Idl, this object will be an ovs.db.idl.Row
object.)
If some error occurs (e.g. the database server's idea of the column
is different from the IDL's idea), then the default value for the
scalar type is used (see Atom.default()).
- Otherwise, if the type is not a map, then the value is a Python list
whose elements have the types described above.
- Otherwise, the type is a map, and the value is a Python dict that
maps from key to value, with key and value types determined as
described above.
'uuid_to_row' must be a function that takes a value and an
ovs.db.types.BaseType and translates UUIDs into row objects."""
if self.type.is_scalar():
value = uuid_to_row(self.as_scalar(), self.type.key)
if value is None:
return self.type.key.default()
else:
return value
elif self.type.is_map():
value = {}
for k, v in self.values.iteritems():
dk = uuid_to_row(k.value, self.type.key)
dv = uuid_to_row(v.value, self.type.value)
if dk is not None and dv is not None:
value[dk] = dv
return value
else:
s = set()
for k in self.values:
dk = uuid_to_row(k.value, self.type.key)
if dk is not None:
s.add(dk)
return sorted(s)
@staticmethod
def from_python(type_, value, row_to_uuid):
"""Returns a new Datum with the given ovs.db.types.Type 'type_'. The
new datum's value is taken from 'value', which must take the form
described as a valid return value from Datum.to_python() for 'type'.
Each scalar value within 'value' is initally passed through
'row_to_uuid', which should convert objects that represent rows (if
any) into uuid.UUID objects and return other data unchanged.
Raises ovs.db.error.Error if 'value' is not in an appropriate form for
'type_'."""
d = {}
if type(value) == dict:
for k, v in value.iteritems():
ka = Atom.from_python(type_.key, row_to_uuid(k))
va = Atom.from_python(type_.value, row_to_uuid(v))
d[ka] = va
elif type(value) in (list, tuple):
for k in value:
ka = Atom.from_python(type_.key, row_to_uuid(k))
d[ka] = None
else:
ka = Atom.from_python(type_.key, row_to_uuid(value))
d[ka] = None
datum = Datum(type_, d)
datum.check_constraints()
if not datum.conforms_to_type():
raise error.Error("%d values when type requires between %d and %d"
% (len(d), type_.n_min, type_.n_max))
return datum
def __getitem__(self, key):
if not isinstance(key, Atom):
key = Atom.new(key)
if not self.type.is_map():
raise IndexError
elif key not in self.values:
raise KeyError
else:
return self.values[key].value
def get(self, key, default=None):
if not isinstance(key, Atom):
key = Atom.new(key)
if key in self.values:
return self.values[key].value
else:
return default
def __str__(self):
return self.to_string()
def conforms_to_type(self):
n = len(self.values)
return self.type.n_min <= n <= self.type.n_max
def cInitDatum(self, var):
if len(self.values) == 0:
return ["ovsdb_datum_init_empty(%s);" % var]
s = ["%s->n = %d;" % (var, len(self.values))]
s += ["%s->keys = xmalloc(%d * sizeof *%s->keys);"
% (var, len(self.values), var)]
for i, key in enumerate(sorted(self.values)):
s += key.cInitAtom("%s->keys[%d]" % (var, i))
if self.type.value:
s += ["%s->values = xmalloc(%d * sizeof *%s->values);"
% (var, len(self.values), var)]
for i, (key, value) in enumerate(sorted(self.values.items())):
s += value.cInitAtom("%s->values[%d]" % (var, i))
else:
s += ["%s->values = NULL;" % var]
if len(self.values) > 1:
s += ["ovsdb_datum_sort_assert(%s, OVSDB_TYPE_%s);"
% (var, self.type.key.type.to_string().upper())]
return s
| apache-2.0 |
da1z/intellij-community | python/lib/Lib/site-packages/django/conf/locale/en/formats.py | 318 | 1637 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
TIME_FORMAT = 'P'
DATETIME_FORMAT = 'N j, Y, P'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'F j'
SHORT_DATE_FORMAT = 'm/d/Y'
SHORT_DATETIME_FORMAT = 'm/d/Y P'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
DECIMAL_SEPARATOR = u'.'
THOUSAND_SEPARATOR = u','
NUMBER_GROUPING = 3
| apache-2.0 |
Chuban/moose | python/TestHarness/testers/Tester.py | 2 | 27923 | import platform, re, os
from TestHarness import util
from FactorySystem.MooseObject import MooseObject
from tempfile import TemporaryFile
import subprocess
from signal import SIGTERM
class Tester(MooseObject):
"""
Base class from which all tester objects are instanced.
"""
@staticmethod
def validParams():
params = MooseObject.validParams()
# Common Options
params.addRequiredParam('type', "The type of test of Tester to create for this test.")
params.addParam('max_time', 300, "The maximum in seconds that the test will be allowed to run.")
params.addParam('min_reported_time', 10, "The minimum time elapsed before a test is reported as taking to long to run.")
params.addParam('skip', "Provide a reason this test will be skipped.")
params.addParam('deleted', "Tests that only show up when using the '-e' option (Permanently skipped or not implemented).")
params.addParam('heavy', False, "Set to True if this test should only be run when the '--heavy' option is used.")
params.addParam('group', [], "A list of groups for which this test belongs.")
params.addParam('prereq', [], "A list of prereq tests that need to run successfully before launching this test.")
params.addParam('skip_checks', False, "Tells the TestHarness to skip additional checks (This parameter is set automatically by the TestHarness during recovery tests)")
params.addParam('scale_refine', 0, "The number of refinements to do when scaling")
params.addParam('success_message', 'OK', "The successful message")
params.addParam('cli_args', [], "Additional arguments to be passed to the test.")
params.addParam('allow_test_objects', False, "Allow the use of test objects by adding --allow-test-objects to the command line.")
params.addParam('valgrind', 'NONE', "Set to (NONE, NORMAL, HEAVY) to determine which configurations where valgrind will run.")
# Test Filters
params.addParam('platform', ['ALL'], "A list of platforms for which this test will run on. ('ALL', 'DARWIN', 'LINUX', 'SL', 'LION', 'ML')")
params.addParam('compiler', ['ALL'], "A list of compilers for which this test is valid on. ('ALL', 'GCC', 'INTEL', 'CLANG')")
params.addParam('petsc_version', ['ALL'], "A list of petsc versions for which this test will run on, supports normal comparison operators ('<', '>', etc...)")
params.addParam('petsc_version_release', ['ALL'], "A test that runs against PETSc master if FALSE ('ALL', 'TRUE', 'FALSE')")
params.addParam('slepc_version', [], "A list of slepc versions for which this test will run on, supports normal comparison operators ('<', '>', etc...)")
params.addParam('mesh_mode', ['ALL'], "A list of mesh modes for which this test will run ('DISTRIBUTED', 'REPLICATED')")
params.addParam('method', ['ALL'], "A test that runs under certain executable configurations ('ALL', 'OPT', 'DBG', 'DEVEL', 'OPROF', 'PRO')")
params.addParam('library_mode', ['ALL'], "A test that only runs when libraries are built under certain configurations ('ALL', 'STATIC', 'DYNAMIC')")
params.addParam('dtk', ['ALL'], "A test that runs only if DTK is detected ('ALL', 'TRUE', 'FALSE')")
params.addParam('unique_ids', ['ALL'], "A test that runs only if UNIQUE_IDs are enabled ('ALL', 'TRUE', 'FALSE')")
params.addParam('recover', True, "A test that runs with '--recover' mode enabled")
params.addParam('vtk', ['ALL'], "A test that runs only if VTK is detected ('ALL', 'TRUE', 'FALSE')")
params.addParam('tecplot', ['ALL'], "A test that runs only if Tecplot is detected ('ALL', 'TRUE', 'FALSE')")
params.addParam('dof_id_bytes', ['ALL'], "A test that runs only if libmesh is configured --with-dof-id-bytes = a specific number, e.g. '4', '8'")
params.addParam('petsc_debug', ['ALL'], "{False,True} -> test only runs when PETSc is configured with --with-debugging={0,1}, otherwise test always runs.")
params.addParam('curl', ['ALL'], "A test that runs only if CURL is detected ('ALL', 'TRUE', 'FALSE')")
params.addParam('tbb', ['ALL'], "A test that runs only if TBB is available ('ALL', 'TRUE', 'FALSE')")
params.addParam('superlu', ['ALL'], "A test that runs only if SuperLU is available via PETSc ('ALL', 'TRUE', 'FALSE')")
params.addParam('slepc', ['ALL'], "A test that runs only if SLEPc is available ('ALL', 'TRUE', 'FALSE')")
params.addParam('unique_id', ['ALL'], "A test that runs only if libmesh is configured with --enable-unique-id ('ALL', 'TRUE', 'FALSE')")
params.addParam('cxx11', ['ALL'], "A test that runs only if CXX11 is available ('ALL', 'TRUE', 'FALSE')")
params.addParam('asio', ['ALL'], "A test that runs only if ASIO is available ('ALL', 'TRUE', 'FALSE')")
params.addParam('depend_files', [], "A test that only runs if all depend files exist (files listed are expected to be relative to the base directory, not the test directory")
params.addParam('env_vars', [], "A test that only runs if all the environment variables listed exist")
params.addParam('should_execute', True, 'Whether or not the executable needs to be run. Use this to chain together multiple tests based off of one executeable invocation')
params.addParam('required_submodule', [], "A list of initialized submodules for which this test requires.")
params.addParam('required_objects', [], "A list of required objects that are in the executable.")
params.addParam('check_input', False, "Check for correct input file syntax")
params.addParam('display_required', False, "The test requires and active display for rendering (i.e., ImageDiff tests).")
params.addParam('boost', ['ALL'], "A test that runs only if BOOT is detected ('ALL', 'TRUE', 'FALSE')")
# Queueing specific
params.addParam('copy_files', [], "Additional list of files/directories to copy when performing queueing operations")
params.addParam('link_files', [], "Additional list of files/directories to symlink when performing queueing operations")
params.addParam('queue_scheduler', True, "A test that runs only if using queue options")
return params
# This is what will be checked for when we look for valid testers
IS_TESTER = True
def __init__(self, name, params):
MooseObject.__init__(self, name, params)
self.specs = params
self.outfile = None
self.std_out = ''
self.exit_code = 0
self.process = None
# Bool if test can run
self._runnable = None
# Initialize the status bucket class
self.status = util.TestStatus()
# Enumerate the buckets here so ther are easier to work with in the tester class
self.bucket_initialized = self.status.bucket_initialized
self.bucket_success = self.status.bucket_success
self.bucket_fail = self.status.bucket_fail
self.bucket_diff = self.status.bucket_diff
self.bucket_pending = self.status.bucket_pending
self.bucket_finished = self.status.bucket_finished
self.bucket_deleted = self.status.bucket_deleted
self.bucket_skip = self.status.bucket_skip
self.bucket_silent = self.status.bucket_silent
self.bucket_queued = self.status.bucket_queued
self.bucket_waiting_processing = self.status.bucket_waiting_processing
# Set the status message
if self.specs['check_input']:
self.success_message = 'SYNTAX PASS'
else:
self.success_message = self.specs['success_message']
# Set up common paramaters
self.should_execute = self.specs['should_execute']
self.check_input = self.specs['check_input']
if self.specs["allow_test_objects"]:
self.specs["cli_args"].append("--allow-test-objects")
def getTestName(self):
""" return test name """
return self.specs['test_name']
def getPrereqs(self):
""" return list of prerequisite tests this test depends on """
return self.specs['prereq']
def getMooseDir(self):
""" return moose directory """
return self.specs['moose_dir']
def getTestDir(self):
""" return directory this tester is located """
return self.specs['test_dir']
def getMinReportTime(self):
""" return minimum time elapse before reporting a 'long running' status """
return self.specs['min_reported_time']
def getMaxTime(self):
""" return maximum time elapse before reporting a 'timeout' status """
return self.specs['max_time']
def getRunnable(self, options):
""" return bool and cache results, if this test can run """
if self._runnable is None:
self._runnable = self.checkRunnableBase(options)
return self._runnable
def getColor(self):
""" return print color assigned to this tester """
return self.status.getColor()
def getInputFile(self):
""" return the input file if applicable to this Tester """
return None
def getOutputFiles(self):
""" return the output files if applicable to this Tester """
return []
def getSuccessMessage(self):
""" return the success message assigned to this tester """
return self.success_message
def getStatusMessage(self):
""" return the status message assigned to this tester """
return self.status.getStatusMessage()
def getStatus(self):
""" return current enumerated tester status bucket """
return self.status.getStatus()
def setStatus(self, reason, bucket):
"""
Method to set a testers status.
Syntax:
.setStatus('str message', <enumerated tester status bucket>)
"""
self.status.setStatus(reason, bucket)
return self.getStatus()
# Method to check if a test has failed. This method will return true if a
# tester has failed at any point during the processing of the test.
# Note: It's possible for a tester to report false for both didFail and
# didPass. This will happen if the tester is in-progress for instance.
# See didPass()
def didFail(self):
"""
return bool for tester failure
see util.TestStatus for more information
"""
return self.status.didFail()
# Method to check for successfull test
# Note: This method can return False until the tester has completely finished.
# For this reason it should be used only after the tester has completed.
# Instead you may want to use the didFail method which returns false
# only if the tester has failed at any point during the processing
# of that tester (e.g. after the main command has been run but before
# output has been tested).
# See didFail()
def didPass(self):
"""
return boolean for tester successfulness
see util.TestStatus for more information
"""
return self.status.didPass()
def didDiff(self):
"""
return boolean for a differential tester failure
see util.TestStatus for more information
"""
return self.status.didDiff()
def isInitialized(self):
"""
return boolean for tester in an initialization status
see util.TestStatus for more information
"""
return self.status.isInitialized()
def isPending(self):
"""
return boolean for tester in a pending status
see util.TestStatus for more information
"""
return self.status.isPending()
def isFinished(self):
"""
return boolean for tester no longer pending
see util.TestStatus for more information
"""
return self.status.isFinished()
def isSkipped(self):
"""
return boolean for tester being reported as skipped
see util.TestStatus for more information
"""
return self.status.isSkipped()
def isSilent(self):
"""
return boolean for tester being skipped and not reported
see util.TestStatus for more information
"""
return self.status.isSilent()
def isDeleted(self):
"""
return boolean for tester being skipped and not reported due to
internal deletion status
see util.TestStatus for more information
"""
return self.status.isDeleted()
def isQueued(self):
"""
return boolean for tester in a queued status
see util.TestStatus for more information
"""
return self.status.isQueued()
def isWaiting(self):
"""
return boolean for tester awaiting process results
"""
return self.status.isWaiting()
def getCheckInput(self):
return self.check_input
def setValgrindMode(self, mode):
""" Increase the alloted time for tests when running with the valgrind option """
if mode == 'NORMAL':
self.specs['max_time'] = self.specs['max_time'] * 2
elif mode == 'HEAVY':
self.specs['max_time'] = self.specs['max_time'] * 6
def checkRunnable(self, options):
"""
Derived method to return tuple if this tester should be executed or not.
The tuple should be structured as (boolean, 'reason'). If false, and the
reason is left blank, this tester will be treated as silent (no status
will be printed and will not be counted among the skipped tests).
"""
return (True, '')
def shouldExecute(self):
"""
return boolean for tester allowed to execute its command
see .getCommand for more information
"""
return self.should_execute
def prepare(self, options):
"""
Method which is called prior to running the test. It can be used to cleanup files
or do other preparations before the tester is run.
"""
return
def getThreads(self, options):
""" return number of threads to use for this tester """
return 1
def getProcs(self, options):
""" return number of processors to use for this tester """
return 1
def getCommand(self, options):
""" return the executable command that will be executed by the tester """
return
def runCommand(self, timer, options):
"""
Helper method for running external (sub)processes as part of the tester's execution. This
uses the tester's getCommand and getTestDir methods to run a subprocess. The timer must
be the same timer passed to the run method. Results from running the subprocess is stored
in the tester's output and exit_code fields.
"""
cmd = self.getCommand(options)
cwd = self.getTestDir()
self.process = None
try:
f = TemporaryFile()
# On Windows, there is an issue with path translation when the command is passed in
# as a list.
if platform.system() == "Windows":
process = subprocess.Popen(cmd,stdout=f,stderr=f,close_fds=False, shell=True, creationflags=subprocess.CREATE_NEW_PROCESS_GROUP, cwd=cwd)
else:
process = subprocess.Popen(cmd,stdout=f,stderr=f,close_fds=False, shell=True, preexec_fn=os.setsid, cwd=cwd)
except:
print("Error in launching a new task", cmd)
raise
self.process = process
self.outfile = f
timer.start()
process.wait()
timer.stop()
self.exit_code = process.poll()
# store the contents of output, and close the file
self.std_out = util.readOutput(self.outfile, options)
self.outfile.close()
def killCommand(self):
"""
Kills any currently executing process started by the runCommand method.
"""
if self.process is not None:
try:
if platform.system() == "Windows":
self.process.terminate()
else:
pgid = os.getpgid(self.process.pid)
os.killpg(pgid, SIGTERM)
except OSError: # Process already terminated
pass
def run(self, timer, options):
"""
This is a method that is the tester's main execution code. Subclasses can override this
method with custom code relevant to their specific testing needs. By default this method
calls runCommand. runCommand is provided as a helper for running (external) subprocesses
as part of the tester's execution and should be the *only* way subprocesses are executed
if needed. The run method is responsible to call the start+stop methods on timer to record
the time taken to run the actual test. start+stop can be called multiple times.
"""
self.runCommand(timer, options)
def processResultsCommand(self, moose_dir, options):
""" method to return the commands (list) used for processing results """
return []
def processResults(self, moose_dir, options, output):
""" method to process the results of a finished tester """
return
def hasRedirectedOutput(self, options):
""" return bool on tester having redirected output """
return (self.specs.isValid('redirect_output') and self.specs['redirect_output'] == True and self.getProcs(options) > 1)
def getRedirectedOutputFiles(self, options):
""" return a list of redirected output """
return [os.path.join(self.getTestDir(), self.name() + '.processor.{}'.format(p)) for p in xrange(self.getProcs(options))]
def checkRunnableBase(self, options):
"""
Method to check for caveats that would prevent this tester from
executing correctly (or not at all).
DO NOT override this method. Instead, see .checkRunnable()
"""
reasons = {}
checks = options._checks
# If the something has already deemed this test a failure, return now
if self.didFail():
return False
# If --dry-run set the test status to pass and DO NOT return.
# This will allow additional checks to perform and report tests
# that would normally be skipped (and return as False).
if options.dry_run:
self.success_message = 'DRY RUN'
self.setStatus(self.success_message, self.bucket_success)
# Check if we only want to run failed tests
if options.failed_tests:
if self.specs['test_name'] not in options._test_list:
self.setStatus('not failed', self.bucket_silent)
return False
# Check if we only want to run syntax tests
if options.check_input:
if not self.specs['check_input']:
self.setStatus('not check_input', self.bucket_silent)
return False
# Are we running only tests in a specific group?
if options.group <> 'ALL' and options.group not in self.specs['group']:
self.setStatus('unmatched group', self.bucket_silent)
return False
if options.not_group <> '' and options.not_group in self.specs['group']:
self.setStatus('unmatched group', self.bucket_silent)
return False
# Store regexp for matching tests if --re is used
if options.reg_exp:
match_regexp = re.compile(options.reg_exp)
# If --re then only test matching regexp. Needs to run before other SKIP methods
# This also needs to be in its own bucket group. We normally print skipped messages.
# But we do not want to print tests that didn't match regex.
if options.reg_exp and not match_regexp.search(self.specs['test_name']):
self.setStatus('silent', self.bucket_silent)
return False
# Short circuit method and run this test if we are ignoring all caveats
if options.ignored_caveats == 'all':
# Still, we should abide by the derived classes
return self.checkRunnable(options)
# Check for deleted tests
if self.specs.isValid('deleted'):
reasons['deleted'] = 'deleted (' + self.specs['deleted'] + ')'
# Check for skipped tests
if self.specs.type('skip') is bool and self.specs['skip']:
# Backwards compatible (no reason)
reasons['skip'] = 'no reason'
elif self.specs.type('skip') is not bool and self.specs.isValid('skip'):
reasons['skip'] = self.specs['skip']
# If were testing for SCALE_REFINE, then only run tests with a SCALE_REFINE set
elif (options.store_time or options.scaling) and self.specs['scale_refine'] == 0:
self.setStatus('silent', self.bucket_silent)
return False
# If we're testing with valgrind, then skip tests that require parallel or threads or don't meet the valgrind setting
elif options.valgrind_mode != '':
tmp_reason = ''
if self.specs['valgrind'].upper() == 'NONE':
tmp_reason = 'Valgrind==NONE'
elif self.specs['valgrind'].upper() == 'HEAVY' and options.valgrind_mode.upper() == 'NORMAL':
tmp_reason = 'Valgrind==HEAVY'
elif self.specs['min_parallel'] > 1 or self.specs['min_threads'] > 1:
tmp_reason = 'Valgrind requires serial'
if tmp_reason != '':
reasons['valgrind'] = tmp_reason
# If we're running in recover mode skip tests that have recover = false
elif options.enable_recover and self.specs['recover'] == False:
reasons['recover'] = 'NO RECOVER'
# Check for PETSc versions
(petsc_status, logic_reason, petsc_version) = util.checkPetscVersion(checks, self.specs)
if not petsc_status:
reasons['petsc_version'] = 'using PETSc ' + str(checks['petsc_version']) + ' REQ: ' + logic_reason + ' ' + petsc_version
# Check for SLEPc versions
(slepc_status, logic_reason, slepc_version) = util.checkSlepcVersion(checks, self.specs)
if not slepc_status and len(self.specs['slepc_version']) != 0:
if slepc_version != None:
reasons['slepc_version'] = 'using SLEPc ' + str(checks['slepc_version']) + ' REQ: ' + logic_reason + ' ' + slepc_version
elif slepc_version == None:
reasons['slepc_version'] = 'SLEPc is not installed'
# PETSc and SLEPc is being explicitly checked above
local_checks = ['platform', 'compiler', 'mesh_mode', 'method', 'library_mode', 'dtk', 'unique_ids', 'vtk', 'tecplot', \
'petsc_debug', 'curl', 'tbb', 'superlu', 'cxx11', 'asio', 'unique_id', 'slepc', 'petsc_version_release', 'boost']
for check in local_checks:
test_platforms = set()
operator_display = '!='
inverse_set = False
for x in self.specs[check]:
if x[0] == '!':
if inverse_set:
reasons[check] = 'Multiple Negation Unsupported'
inverse_set = True
operator_display = '=='
x = x[1:] # Strip off the !
x_upper = x.upper()
if x_upper in test_platforms:
reasons[x_upper] = 'Duplicate Entry or Negative of Existing Entry'
test_platforms.add(x.upper())
match_found = len(test_platforms.intersection(checks[check])) > 0
# Either we didn't find the match when we were using normal "include" logic
# or we did find the match when we wanted to exclude it
if inverse_set == match_found:
reasons[check] = re.sub(r'\[|\]', '', check).upper() + operator_display + ', '.join(test_platforms)
# Check for heavy tests
if options.all_tests or options.heavy_tests:
if not self.specs['heavy'] and options.heavy_tests:
reasons['heavy'] = 'NOT HEAVY'
elif self.specs['heavy']:
reasons['heavy'] = 'HEAVY'
# Check for positive scale refine values when using store timing options
if self.specs['scale_refine'] == 0 and options.store_time:
reasons['scale_refine'] = 'scale_refine==0 store_time=True'
# There should only be one entry in self.specs['dof_id_bytes']
for x in self.specs['dof_id_bytes']:
if x != 'ALL' and not x in checks['dof_id_bytes']:
reasons['dof_id_bytes'] = '--with-dof-id-bytes!=' + x
# Check to make sure depend files exist
for file in self.specs['depend_files']:
if not os.path.isfile(os.path.join(self.specs['base_dir'], file)):
reasons['depend_files'] = 'DEPEND FILES'
# We calculate the exe_objects only if we need them
if self.specs["required_objects"] and checks["exe_objects"] is None:
checks["exe_objects"] = util.getExeObjects(self.specs["executable"])
# Check to see if we have the required object names
for var in self.specs['required_objects']:
if var not in checks["exe_objects"]:
reasons['required_objects'] = '%s not found in executable' % var
break
# Check to make sure required submodules are initialized
for var in self.specs['required_submodule']:
if var not in checks["submodules"]:
reasons['required_submodule'] = '%s submodule not initialized' % var
# Check to make sure environment variable exists
for var in self.specs['env_vars']:
if not os.environ.has_key(var):
reasons['env_vars'] = 'ENV VAR NOT SET'
# Check for display
if self.specs['display_required'] and not os.getenv('DISPLAY', False):
reasons['display_required'] = 'NO DISPLAY'
# Check for queueing
if (not self.specs['queue_scheduler'] or not self.shouldExecute()) \
and options.queueing:
reasons['queue_scheduler'] = 'queue not supported'
# Remove any matching user supplied caveats from accumulated checkRunnable caveats that
# would normally produce a skipped test.
caveat_list = set()
if options.ignored_caveats:
caveat_list = set([x.lower() for x in options.ignored_caveats.split()])
if len(set(reasons.keys()) - caveat_list) > 0:
tmp_reason = []
for key, value in reasons.iteritems():
if key.lower() not in caveat_list:
tmp_reason.append(value)
# Format joined reason to better fit on the screen
if len(', '.join(tmp_reason)) >= util.TERM_COLS - (len(self.specs['test_name'])+21):
flat_reason = (', '.join(tmp_reason))[:(util.TERM_COLS - (len(self.specs['test_name'])+24))] + '...'
else:
flat_reason = ', '.join(tmp_reason)
# If the test is deleted we still need to treat this differently
if 'deleted' in reasons.keys():
self.setStatus(flat_reason, self.bucket_deleted)
else:
self.setStatus(flat_reason, self.bucket_skip)
return False
# Check the return values of the derived classes
self._runnable = self.checkRunnable(options)
return self._runnable
| lgpl-2.1 |
paran0ids0ul/infernal-twin | build/pillow/build/lib.linux-i686-2.7/PIL/ImageMorph.py | 26 | 7978 | # A binary morphology add-on for the Python Imaging Library
#
# History:
# 2014-06-04 Initial version.
#
# Copyright (c) 2014 Dov Grobgeld <dov.grobgeld@gmail.com>
from PIL import Image
from PIL import _imagingmorph
import re
LUT_SIZE = 1 << 9
class LutBuilder(object):
"""A class for building a MorphLut from a descriptive language
The input patterns is a list of a strings sequences like these::
4:(...
.1.
111)->1
(whitespaces including linebreaks are ignored). The option 4
describes a series of symmetry operations (in this case a
4-rotation), the pattern is described by:
- . or X - Ignore
- 1 - Pixel is on
- 0 - Pixel is off
The result of the operation is described after "->" string.
The default is to return the current pixel value, which is
returned if no other match is found.
Operations:
- 4 - 4 way rotation
- N - Negate
- 1 - Dummy op for no other operation (an op must always be given)
- M - Mirroring
Example::
lb = LutBuilder(patterns = ["4:(... .1. 111)->1"])
lut = lb.build_lut()
"""
def __init__(self, patterns=None, op_name=None):
if patterns is not None:
self.patterns = patterns
else:
self.patterns = []
self.lut = None
if op_name is not None:
known_patterns = {
'corner': ['1:(... ... ...)->0',
'4:(00. 01. ...)->1'],
'dilation4': ['4:(... .0. .1.)->1'],
'dilation8': ['4:(... .0. .1.)->1',
'4:(... .0. ..1)->1'],
'erosion4': ['4:(... .1. .0.)->0'],
'erosion8': ['4:(... .1. .0.)->0',
'4:(... .1. ..0)->0'],
'edge': ['1:(... ... ...)->0',
'4:(.0. .1. ...)->1',
'4:(01. .1. ...)->1']
}
if op_name not in known_patterns:
raise Exception('Unknown pattern '+op_name+'!')
self.patterns = known_patterns[op_name]
def add_patterns(self, patterns):
self.patterns += patterns
def build_default_lut(self):
symbols = [0, 1]
m = 1 << 4 # pos of current pixel
self.lut = bytearray([symbols[(i & m) > 0] for i in range(LUT_SIZE)])
def get_lut(self):
return self.lut
def _string_permute(self, pattern, permutation):
"""string_permute takes a pattern and a permutation and returns the
string permuted according to the permutation list.
"""
assert(len(permutation) == 9)
return ''.join([pattern[p] for p in permutation])
def _pattern_permute(self, basic_pattern, options, basic_result):
"""pattern_permute takes a basic pattern and its result and clones
the pattern according to the modifications described in the $options
parameter. It returns a list of all cloned patterns."""
patterns = [(basic_pattern, basic_result)]
# rotations
if '4' in options:
res = patterns[-1][1]
for i in range(4):
patterns.append(
(self._string_permute(patterns[-1][0], [6, 3, 0,
7, 4, 1,
8, 5, 2]), res))
# mirror
if 'M' in options:
n = len(patterns)
for pattern, res in patterns[0:n]:
patterns.append(
(self._string_permute(pattern, [2, 1, 0,
5, 4, 3,
8, 7, 6]), res))
# negate
if 'N' in options:
n = len(patterns)
for pattern, res in patterns[0:n]:
# Swap 0 and 1
pattern = (pattern
.replace('0', 'Z')
.replace('1', '0')
.replace('Z', '1'))
res = '%d' % (1-int(res))
patterns.append((pattern, res))
return patterns
def build_lut(self):
"""Compile all patterns into a morphology lut.
TBD :Build based on (file) morphlut:modify_lut
"""
self.build_default_lut()
patterns = []
# Parse and create symmetries of the patterns strings
for p in self.patterns:
m = re.search(
r'(\w*):?\s*\((.+?)\)\s*->\s*(\d)', p.replace('\n', ''))
if not m:
raise Exception('Syntax error in pattern "'+p+'"')
options = m.group(1)
pattern = m.group(2)
result = int(m.group(3))
# Get rid of spaces
pattern = pattern.replace(' ', '').replace('\n', '')
patterns += self._pattern_permute(pattern, options, result)
# # Debugging
# for p,r in patterns:
# print p,r
# print '--'
# compile the patterns into regular expressions for speed
for i in range(len(patterns)):
p = patterns[i][0].replace('.', 'X').replace('X', '[01]')
p = re.compile(p)
patterns[i] = (p, patterns[i][1])
# Step through table and find patterns that match.
# Note that all the patterns are searched. The last one
# caught overrides
for i in range(LUT_SIZE):
# Build the bit pattern
bitpattern = bin(i)[2:]
bitpattern = ('0'*(9-len(bitpattern)) + bitpattern)[::-1]
for p, r in patterns:
if p.match(bitpattern):
self.lut[i] = [0, 1][r]
return self.lut
class MorphOp(object):
"""A class for binary morphological operators"""
def __init__(self,
lut=None,
op_name=None,
patterns=None):
"""Create a binary morphological operator"""
self.lut = lut
if op_name is not None:
self.lut = LutBuilder(op_name=op_name).build_lut()
elif patterns is not None:
self.lut = LutBuilder(patterns=patterns).build_lut()
def apply(self, image):
"""Run a single morphological operation on an image
Returns a tuple of the number of changed pixels and the
morphed image"""
if self.lut is None:
raise Exception('No operator loaded')
outimage = Image.new(image.mode, image.size, None)
count = _imagingmorph.apply(
bytes(self.lut), image.im.id, outimage.im.id)
return count, outimage
def match(self, image):
"""Get a list of coordinates matching the morphological operation on
an image.
Returns a list of tuples of (x,y) coordinates
of all matching pixels."""
if self.lut is None:
raise Exception('No operator loaded')
return _imagingmorph.match(bytes(self.lut), image.im.id)
def get_on_pixels(self, image):
"""Get a list of all turned on pixels in a binary image
Returns a list of tuples of (x,y) coordinates
of all matching pixels."""
return _imagingmorph.get_on_pixels(image.im.id)
def load_lut(self, filename):
"""Load an operator from an mrl file"""
with open(filename, 'rb') as f:
self.lut = bytearray(f.read())
if len(self.lut) != 8192:
self.lut = None
raise Exception('Wrong size operator file!')
def save_lut(self, filename):
"""Save an operator to an mrl file"""
if self.lut is None:
raise Exception('No operator loaded')
with open(filename, 'wb') as f:
f.write(self.lut)
def set_lut(self, lut):
"""Set the lut from an external source"""
self.lut = lut
# End of file
| gpl-3.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/pip/_internal/commands/search.py | 21 | 4728 | from __future__ import absolute_import
import logging
import sys
import textwrap
from collections import OrderedDict
from pip._vendor import pkg_resources
from pip._vendor.packaging.version import parse as parse_version
# NOTE: XMLRPC Client is not annotated in typeshed as on 2017-07-17, which is
# why we ignore the type on this import
from pip._vendor.six.moves import xmlrpc_client # type: ignore
from pip._internal.cli.base_command import Command
from pip._internal.cli.status_codes import NO_MATCHES_FOUND, SUCCESS
from pip._internal.download import PipXmlrpcTransport
from pip._internal.exceptions import CommandError
from pip._internal.models.index import PyPI
from pip._internal.utils.compat import get_terminal_size
from pip._internal.utils.logging import indent_log
logger = logging.getLogger(__name__)
class SearchCommand(Command):
"""Search for PyPI packages whose name or summary contains <query>."""
name = 'search'
usage = """
%prog [options] <query>"""
summary = 'Search PyPI for packages.'
ignore_require_venv = True
def __init__(self, *args, **kw):
super(SearchCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-i', '--index',
dest='index',
metavar='URL',
default=PyPI.pypi_url,
help='Base URL of Python Package Index (default %default)')
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
if not args:
raise CommandError('Missing required argument (search query).')
query = args
pypi_hits = self.search(query, options)
hits = transform_hits(pypi_hits)
terminal_width = None
if sys.stdout.isatty():
terminal_width = get_terminal_size()[0]
print_results(hits, terminal_width=terminal_width)
if pypi_hits:
return SUCCESS
return NO_MATCHES_FOUND
def search(self, query, options):
index_url = options.index
with self._build_session(options) as session:
transport = PipXmlrpcTransport(index_url, session)
pypi = xmlrpc_client.ServerProxy(index_url, transport)
hits = pypi.search({'name': query, 'summary': query}, 'or')
return hits
def transform_hits(hits):
"""
The list from pypi is really a list of versions. We want a list of
packages with the list of versions stored inline. This converts the
list from pypi into one we can use.
"""
packages = OrderedDict()
for hit in hits:
name = hit['name']
summary = hit['summary']
version = hit['version']
if name not in packages.keys():
packages[name] = {
'name': name,
'summary': summary,
'versions': [version],
}
else:
packages[name]['versions'].append(version)
# if this is the highest version, replace summary and score
if version == highest_version(packages[name]['versions']):
packages[name]['summary'] = summary
return list(packages.values())
def print_results(hits, name_column_width=None, terminal_width=None):
if not hits:
return
if name_column_width is None:
name_column_width = max([
len(hit['name']) + len(highest_version(hit.get('versions', ['-'])))
for hit in hits
]) + 4
installed_packages = [p.project_name for p in pkg_resources.working_set]
for hit in hits:
name = hit['name']
summary = hit['summary'] or ''
latest = highest_version(hit.get('versions', ['-']))
if terminal_width is not None:
target_width = terminal_width - name_column_width - 5
if target_width > 10:
# wrap and indent summary to fit terminal
summary = textwrap.wrap(summary, target_width)
summary = ('\n' + ' ' * (name_column_width + 3)).join(summary)
line = '%-*s - %s' % (name_column_width,
'%s (%s)' % (name, latest), summary)
try:
logger.info(line)
if name in installed_packages:
dist = pkg_resources.get_distribution(name)
with indent_log():
if dist.version == latest:
logger.info('INSTALLED: %s (latest)', dist.version)
else:
logger.info('INSTALLED: %s', dist.version)
logger.info('LATEST: %s', latest)
except UnicodeEncodeError:
pass
def highest_version(versions):
return max(versions, key=parse_version)
| gpl-3.0 |
cnplab/blockmon | daemon/core/host.py | 1 | 11963 | # Copyright (c) 2011, NEC Europe Ltd, Consorzio Nazionale
# Interuniversitario per le Telecomunicazioni, Institut
# Telecom/Telecom Bretagne, ETH Zuerich, INVEA-TECH a.s. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the names of NEC Europe Ltd, Consorzio Nazionale
# Interuniversitario per le Telecomunicazioni, Institut Telecom/Telecom
# Bretagne, ETH Zuerich, INVEA-TECH a.s. nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
# HOLDERBE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
#
import xml.dom.minidom
import commands
import math
class HostSpecsManager:
"""\brief Retrieves hardware specs from the local host
"""
def __init__(self):
pass
def get_host_specs(self):
"""\brief Parses lshw output for the local host and populates a
HostSpecsInfo object with the information.
\return (\c HostSpecsInfo) The host's specs, or None if error
"""
(status, output) = commands.getstatusoutput('lshw -xml 2>/dev/null')
if (status):
print "host::get_host_specs: error while running lshw"
return None
dom = self.__get_DOM(output, False)
if (dom == None):
print "host::get_host_specs: error while getting DOM object"
return None
nodes = dom.getElementsByTagName("node")
n_cpus = 0
cpu_type = None
nics = []
nic_macs = []
for node in nodes:
if node.hasAttributes():
node_class = self.__get_text(node.attributes['class'].childNodes)
if node_class == "network":
nic = self.__parse_lshw_nic(node)
if nic != None and nic.get_mac() not in nic_macs:
nics.append(nic)
nic_macs.append(nic.get_mac())
cpu_type = self.__get_cpu_type()
n_cpus = self.__get_n_cpus()
cores_per_cpu = self.__get_cores_per_cpu()
memory = self.__get_sys_memory()
return HostSpecsInfo(nics, cpu_type, n_cpus, cores_per_cpu, memory)
def __get_cpu_type(self):
"""\brief Gets the host's CPU type
\return (\c string) The CPU type
"""
(status, output) = commands.getstatusoutput('cat /proc/cpuinfo 2>/dev/null')
if (status):
print "host::__get_cores_per_cpu: error while getting cpuinfo"
return None
# e.g., model name: Intel(R) Core(TM)2 Duo CPU T8300 @ 2.40GHz
for line in output.splitlines():
if line.find("model name") != -1:
return line.split(":")[1].strip()
def __get_n_cpus(self):
"""\brief Gets the number of CPUs on the host
\return (\c int) The number of CPUs
"""
cmd = 'cat /proc/cpuinfo | grep processor | wc -l'
(status, output) = commands.getstatusoutput(cmd)
if (status):
print "host::__get_num_cores: error while getting cpuinfo"
return None
return int(output)
def __get_cores_per_cpu(self):
"""\brief Gets the number of cores per cpu
\return (\c int) The number of cores per cpu
"""
(status, output) = commands.getstatusoutput('cat /proc/cpuinfo 2>/dev/null')
if (status):
print "host::__get_cores_per_cpu: error while getting cpuinfo"
return None
# e.g., cpu cores: 2
for line in output.splitlines():
if line.find("cpu cores") != -1:
return int(line.split(":")[1].strip())
def __get_sys_memory(self):
"""\brief Gets the total system memory in GB
\return (\c int) The total system memory in GB
"""
(status, output) = commands.getstatusoutput('cat /proc/meminfo 2>/dev/null')
if (status):
print "host::__get_cores_per_cpu: error while getting cpuinfo"
return None
# e.g., MemTotal: 2018656 kB
for line in output.splitlines():
if line.find("MemTotal") != -1:
l = line.split(":")[1].strip()
return int(math.ceil(float(l[:len(l) - 3]) / float(1048576)))
def __parse_lshw_nic(self, node):
"""\brief Parses an xml node to see whether it is an interface. If
is, it returns an Interface object, otherwise None is returned
\param node (\c minidom.Node) The xml node
\return (\c Interface) The parsed interface, or None if the node was not an interface
"""
try: model = self.__get_text(node.getElementsByTagName("product")[0].childNodes)
except IndexError: model = ""
try: mac = self.__get_text(node.getElementsByTagName("serial")[0].childNodes)
except IndexError: mac = ""
try: name = self.__get_text(node.getElementsByTagName("logicalname")[0].childNodes)
except IndexError: name = ""
# dirty hack
speed = 1000
if model.find("10 Gigabit") != 0:
speed *= 10
return NICInfo(model, speed, mac, name)
def __get_text(self, nodelist):
"""\brief Concatenates text data from a list of xml nodes
\param nodelist (\c minidom.Node[]) The nodes
\return (\c string) The concatenated text
"""
text = ""
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
text += node.data
return text
def __get_DOM(self, desc, file=True):
"""\brief Turns an xml file into a DOM object. If the file parameter is set to
true, desc should be the path to the xml file to read. Otherwise, desc
is a string containing xml to turn into a DOM object.
\param desc (\c string) Path to an xml file or a string containing xml
\param file (\c bool) Whether desc is a file or an xml string (default is true)
\return (\c xml.dom.minidom.Document) The DOM object
"""
dom = None
try:
if file:
dom = xml.dom.minidom.parse(desc)
else:
dom = xml.dom.minidom.parseString(desc)
except Exception, e:
print "Error getting dom " + str(e)
return None
return dom
def __get_label(self, key, xml_object):
"""\brief Given an xml object and a key, returns the value matching that key
(a string) or None if nothing matches the key.
\param key (\c string) The key to search for
\param xml_object (\c minidom.Node) The xml object to search for the key in
\return (\c string) The value found or None if no value was found for the given key
"""
if xml_object.attributes.has_key(key):
return xml_object.attributes[key].value
else:
return None
class HostSpecsInfo(object):
"""\brief Container class for describing a computer's hardware specs
"""
def __init__(self, nics, cpu_type, n_cpus, cores_per_cpu, memory):
"""\brief Initializes class
\param nics (\c list[NICInfo]) The computer's network interfaces
\param cpu_type (\c string) The CPU type (e.g., Intel Xeon 5655)
\param n_cpus (\c int) The number of cpus
\param cores_per_cpu (\c int) The number of cores per cpu
\param memory (\c int) The amount of memory in GB
"""
self.__nics = nics
self.__cpu_type = cpu_type
self.__n_cpus = n_cpus
self.__cores_per_cpu = cores_per_cpu
self.__memory = memory
def get_nics(self):
return self.__nics
def get_cpu_type(self):
return self.__cpu_type
def get_n_cpus(self):
return self.__n_cpus
def get_cores_per_cpu(self):
return self.__cores_per_cpu
def get_memory(self):
return self.__memory
def __str__(self):
string = "HostSpecsInfo: cpu_type=" + str(self.get_cpu_type()) + "," \
"n_cpus=" + str(self.get_n_cpus()) + "," \
"cores_per_cpu=" + str(self.get_cores_per_cpu()) + "," \
"memory=" + str(self.get_memory()) + "\n"
for n in self.get_nics():
string += str(n) + "\n"
return string
class NICInfo(object):
"""\brief Container class for a NIC's information
"""
def __init__(self, model, speed, mac, name, traffic_info=None):
"""\brief
\param model (\c string) The NIC's model (e.g., Intel 82571EB)
\param speed (\c int) The NIC's speed in Mbits/s (e.g., 10000)
\param mac (\c string) The NIC's mac address
\param name (\c string) The NIC's name (e.g., "eth0")
\param traffic_info (\c TrafficInfo) Which traffic the NIC sees
"""
self.__model = model
self.__speed = speed
self.__mac = mac
self.__name = name
self.__traffic_info = traffic_info
def get_model(self):
return self.__model
def get_speed(self):
return self.__speed
def get_mac(self):
return self.__mac
def get_name(self):
return self.__name
def get_traffic_info(self):
return self.__traffic_info
def set_traffic_info(self, traffic_info):
self.__traffic_info = traffic_info
def __str__(self):
return "NICInfo: model=" + str(self.get_model()) + "," \
"speed=" + str(self.get_speed()) + "," \
"mac=" + str(self.get_mac()) + "," \
"name=" + str(self.get_name()) + "," \
"traffic_info=" + str(self.get_traffic_info())
class TrafficInfo(object):
"""\brief Class for describing the traffic that a NIC sees
"""
# "in" means in from the Internet, unk=unknown
DIRECTION = ["in", "out", "bi", "unk"]
def __init__(self, prefix, direction):
"""\brief
\param prefix (\c string) The prefix the NIC sees (e.g., 128.16.6/24)
\param direction (\c TrafficInfo.DIRECTION) Which direction the traffic's flowing
"""
self.__prefix = prefix
self.__direction = direction
def get_prefix(self):
return self.__prefix
def get_direction(self):
return self.__direction
def __str__(self):
return "TrafficInfo: prefix=" + str(self.get_prefix()) + "," + \
"direction=" + str(self.get_direction())
| bsd-3-clause |
Shanec132006/project | lib/werkzeug/exceptions.py | 316 | 17799 | # -*- coding: utf-8 -*-
"""
werkzeug.exceptions
~~~~~~~~~~~~~~~~~~~
This module implements a number of Python exceptions you can raise from
within your views to trigger a standard non-200 response.
Usage Example
-------------
::
from werkzeug.wrappers import BaseRequest
from werkzeug.wsgi import responder
from werkzeug.exceptions import HTTPException, NotFound
def view(request):
raise NotFound()
@responder
def application(environ, start_response):
request = BaseRequest(environ)
try:
return view(request)
except HTTPException as e:
return e
As you can see from this example those exceptions are callable WSGI
applications. Because of Python 2.4 compatibility those do not extend
from the response objects but only from the python exception class.
As a matter of fact they are not Werkzeug response objects. However you
can get a response object by calling ``get_response()`` on a HTTP
exception.
Keep in mind that you have to pass an environment to ``get_response()``
because some errors fetch additional information from the WSGI
environment.
If you want to hook in a different exception page to say, a 404 status
code, you can add a second except for a specific subclass of an error::
@responder
def application(environ, start_response):
request = BaseRequest(environ)
try:
return view(request)
except NotFound, e:
return not_found(request)
except HTTPException, e:
return e
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
# Because of bootstrapping reasons we need to manually patch ourselves
# onto our parent module.
import werkzeug
werkzeug.exceptions = sys.modules[__name__]
from werkzeug._internal import _get_environ
from werkzeug._compat import iteritems, integer_types, text_type, \
implements_to_string
from werkzeug.wrappers import Response
@implements_to_string
class HTTPException(Exception):
"""
Baseclass for all HTTP exceptions. This exception can be called as WSGI
application to render a default error page or you can catch the subclasses
of it independently and render nicer error messages.
"""
code = None
description = None
def __init__(self, description=None, response=None):
Exception.__init__(self)
if description is not None:
self.description = description
self.response = response
@classmethod
def wrap(cls, exception, name=None):
"""This method returns a new subclass of the exception provided that
also is a subclass of `BadRequest`.
"""
class newcls(cls, exception):
def __init__(self, arg=None, *args, **kwargs):
cls.__init__(self, *args, **kwargs)
exception.__init__(self, arg)
newcls.__module__ = sys._getframe(1).f_globals.get('__name__')
newcls.__name__ = name or cls.__name__ + exception.__name__
return newcls
@property
def name(self):
"""The status name."""
return HTTP_STATUS_CODES.get(self.code, 'Unknown Error')
def get_description(self, environ=None):
"""Get the description."""
return u'<p>%s</p>' % escape(self.description)
def get_body(self, environ=None):
"""Get the HTML body."""
return text_type((
u'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
u'<title>%(code)s %(name)s</title>\n'
u'<h1>%(name)s</h1>\n'
u'%(description)s\n'
) % {
'code': self.code,
'name': escape(self.name),
'description': self.get_description(environ)
})
def get_headers(self, environ=None):
"""Get a list of headers."""
return [('Content-Type', 'text/html')]
def get_response(self, environ=None):
"""Get a response object. If one was passed to the exception
it's returned directly.
:param environ: the optional environ for the request. This
can be used to modify the response depending
on how the request looked like.
:return: a :class:`Response` object or a subclass thereof.
"""
if self.response is not None:
return self.response
if environ is not None:
environ = _get_environ(environ)
headers = self.get_headers(environ)
return Response(self.get_body(environ), self.code, headers)
def __call__(self, environ, start_response):
"""Call the exception as WSGI application.
:param environ: the WSGI environment.
:param start_response: the response callable provided by the WSGI
server.
"""
response = self.get_response(environ)
return response(environ, start_response)
def __str__(self):
return '%d: %s' % (self.code, self.name)
def __repr__(self):
return '<%s \'%s\'>' % (self.__class__.__name__, self)
class BadRequest(HTTPException):
"""*400* `Bad Request`
Raise if the browser sends something to the application the application
or server cannot handle.
"""
code = 400
description = (
'The browser (or proxy) sent a request that this server could '
'not understand.'
)
class ClientDisconnected(BadRequest):
"""Internal exception that is raised if Werkzeug detects a disconnected
client. Since the client is already gone at that point attempting to
send the error message to the client might not work and might ultimately
result in another exception in the server. Mainly this is here so that
it is silenced by default as far as Werkzeug is concerned.
Since disconnections cannot be reliably detected and are unspecified
by WSGI to a large extend this might or might not be raised if a client
is gone.
.. versionadded:: 0.8
"""
class SecurityError(BadRequest):
"""Raised if something triggers a security error. This is otherwise
exactly like a bad request error.
.. versionadded:: 0.9
"""
class Unauthorized(HTTPException):
"""*401* `Unauthorized`
Raise if the user is not authorized. Also used if you want to use HTTP
basic auth.
"""
code = 401
description = (
'The server could not verify that you are authorized to access '
'the URL requested. You either supplied the wrong credentials (e.g. '
'a bad password), or your browser doesn\'t understand how to supply '
'the credentials required.'
)
class Forbidden(HTTPException):
"""*403* `Forbidden`
Raise if the user doesn't have the permission for the requested resource
but was authenticated.
"""
code = 403
description = (
'You don\'t have the permission to access the requested resource. '
'It is either read-protected or not readable by the server.'
)
class NotFound(HTTPException):
"""*404* `Not Found`
Raise if a resource does not exist and never existed.
"""
code = 404
description = (
'The requested URL was not found on the server. '
'If you entered the URL manually please check your spelling and '
'try again.'
)
class MethodNotAllowed(HTTPException):
"""*405* `Method Not Allowed`
Raise if the server used a method the resource does not handle. For
example `POST` if the resource is view only. Especially useful for REST.
The first argument for this exception should be a list of allowed methods.
Strictly speaking the response would be invalid if you don't provide valid
methods in the header which you can do with that list.
"""
code = 405
description = 'The method is not allowed for the requested URL.'
def __init__(self, valid_methods=None, description=None):
"""Takes an optional list of valid http methods
starting with werkzeug 0.3 the list will be mandatory."""
HTTPException.__init__(self, description)
self.valid_methods = valid_methods
def get_headers(self, environ):
headers = HTTPException.get_headers(self, environ)
if self.valid_methods:
headers.append(('Allow', ', '.join(self.valid_methods)))
return headers
class NotAcceptable(HTTPException):
"""*406* `Not Acceptable`
Raise if the server can't return any content conforming to the
`Accept` headers of the client.
"""
code = 406
description = (
'The resource identified by the request is only capable of '
'generating response entities which have content characteristics '
'not acceptable according to the accept headers sent in the '
'request.'
)
class RequestTimeout(HTTPException):
"""*408* `Request Timeout`
Raise to signalize a timeout.
"""
code = 408
description = (
'The server closed the network connection because the browser '
'didn\'t finish the request within the specified time.'
)
class Conflict(HTTPException):
"""*409* `Conflict`
Raise to signal that a request cannot be completed because it conflicts
with the current state on the server.
.. versionadded:: 0.7
"""
code = 409
description = (
'A conflict happened while processing the request. The resource '
'might have been modified while the request was being processed.'
)
class Gone(HTTPException):
"""*410* `Gone`
Raise if a resource existed previously and went away without new location.
"""
code = 410
description = (
'The requested URL is no longer available on this server and '
'there is no forwarding address.</p><p>If you followed a link '
'from a foreign page, please contact the author of this page.'
)
class LengthRequired(HTTPException):
"""*411* `Length Required`
Raise if the browser submitted data but no ``Content-Length`` header which
is required for the kind of processing the server does.
"""
code = 411
description = (
'A request with this method requires a valid <code>Content-'
'Length</code> header.'
)
class PreconditionFailed(HTTPException):
"""*412* `Precondition Failed`
Status code used in combination with ``If-Match``, ``If-None-Match``, or
``If-Unmodified-Since``.
"""
code = 412
description = (
'The precondition on the request for the URL failed positive '
'evaluation.'
)
class RequestEntityTooLarge(HTTPException):
"""*413* `Request Entity Too Large`
The status code one should return if the data submitted exceeded a given
limit.
"""
code = 413
description = (
'The data value transmitted exceeds the capacity limit.'
)
class RequestURITooLarge(HTTPException):
"""*414* `Request URI Too Large`
Like *413* but for too long URLs.
"""
code = 414
description = (
'The length of the requested URL exceeds the capacity limit '
'for this server. The request cannot be processed.'
)
class UnsupportedMediaType(HTTPException):
"""*415* `Unsupported Media Type`
The status code returned if the server is unable to handle the media type
the client transmitted.
"""
code = 415
description = (
'The server does not support the media type transmitted in '
'the request.'
)
class RequestedRangeNotSatisfiable(HTTPException):
"""*416* `Requested Range Not Satisfiable`
The client asked for a part of the file that lies beyond the end
of the file.
.. versionadded:: 0.7
"""
code = 416
description = (
'The server cannot provide the requested range.'
)
class ExpectationFailed(HTTPException):
"""*417* `Expectation Failed`
The server cannot meet the requirements of the Expect request-header.
.. versionadded:: 0.7
"""
code = 417
description = (
'The server could not meet the requirements of the Expect header'
)
class ImATeapot(HTTPException):
"""*418* `I'm a teapot`
The server should return this if it is a teapot and someone attempted
to brew coffee with it.
.. versionadded:: 0.7
"""
code = 418
description = (
'This server is a teapot, not a coffee machine'
)
class UnprocessableEntity(HTTPException):
"""*422* `Unprocessable Entity`
Used if the request is well formed, but the instructions are otherwise
incorrect.
"""
code = 422
description = (
'The request was well-formed but was unable to be followed '
'due to semantic errors.'
)
class PreconditionRequired(HTTPException):
"""*428* `Precondition Required`
The server requires this request to be conditional, typically to prevent
the lost update problem, which is a race condition between two or more
clients attempting to update a resource through PUT or DELETE. By requiring
each client to include a conditional header ("If-Match" or "If-Unmodified-
Since") with the proper value retained from a recent GET request, the
server ensures that each client has at least seen the previous revision of
the resource.
"""
code = 428
description = (
'This request is required to be conditional; try using "If-Match" '
'or "If-Unmodified-Since".'
)
class TooManyRequests(HTTPException):
"""*429* `Too Many Requests`
The server is limiting the rate at which this user receives responses, and
this request exceeds that rate. (The server may use any convenient method
to identify users and their request rates). The server may include a
"Retry-After" header to indicate how long the user should wait before
retrying.
"""
code = 429
description = (
'This user has exceeded an allotted request count. Try again later.'
)
class RequestHeaderFieldsTooLarge(HTTPException):
"""*431* `Request Header Fields Too Large`
The server refuses to process the request because the header fields are too
large. One or more individual fields may be too large, or the set of all
headers is too large.
"""
code = 431
description = (
'One or more header fields exceeds the maximum size.'
)
class InternalServerError(HTTPException):
"""*500* `Internal Server Error`
Raise if an internal server error occurred. This is a good fallback if an
unknown error occurred in the dispatcher.
"""
code = 500
description = (
'The server encountered an internal error and was unable to '
'complete your request. Either the server is overloaded or there '
'is an error in the application.'
)
class NotImplemented(HTTPException):
"""*501* `Not Implemented`
Raise if the application does not support the action requested by the
browser.
"""
code = 501
description = (
'The server does not support the action requested by the '
'browser.'
)
class BadGateway(HTTPException):
"""*502* `Bad Gateway`
If you do proxying in your application you should return this status code
if you received an invalid response from the upstream server it accessed
in attempting to fulfill the request.
"""
code = 502
description = (
'The proxy server received an invalid response from an upstream '
'server.'
)
class ServiceUnavailable(HTTPException):
"""*503* `Service Unavailable`
Status code you should return if a service is temporarily unavailable.
"""
code = 503
description = (
'The server is temporarily unable to service your request due to '
'maintenance downtime or capacity problems. Please try again '
'later.'
)
default_exceptions = {}
__all__ = ['HTTPException']
def _find_exceptions():
for name, obj in iteritems(globals()):
try:
if getattr(obj, 'code', None) is not None:
default_exceptions[obj.code] = obj
__all__.append(obj.__name__)
except TypeError: # pragma: no cover
continue
_find_exceptions()
del _find_exceptions
class Aborter(object):
"""
When passed a dict of code -> exception items it can be used as
callable that raises exceptions. If the first argument to the
callable is an integer it will be looked up in the mapping, if it's
a WSGI application it will be raised in a proxy exception.
The rest of the arguments are forwarded to the exception constructor.
"""
def __init__(self, mapping=None, extra=None):
if mapping is None:
mapping = default_exceptions
self.mapping = dict(mapping)
if extra is not None:
self.mapping.update(extra)
def __call__(self, code, *args, **kwargs):
if not args and not kwargs and not isinstance(code, integer_types):
raise HTTPException(response=code)
if code not in self.mapping:
raise LookupError('no exception for %r' % code)
raise self.mapping[code](*args, **kwargs)
abort = Aborter()
#: an exception that is used internally to signal both a key error and a
#: bad request. Used by a lot of the datastructures.
BadRequestKeyError = BadRequest.wrap(KeyError)
# imported here because of circular dependencies of werkzeug.utils
from werkzeug.utils import escape
from werkzeug.http import HTTP_STATUS_CODES
| apache-2.0 |
Lh4cKg/sl4a | python/src/Lib/zipfile.py | 52 | 52856 | """
Read and write ZIP files.
"""
import struct, os, time, sys, shutil
import binascii, cStringIO, stat
try:
import zlib # We may need its compression method
crc32 = zlib.crc32
except ImportError:
zlib = None
crc32 = binascii.crc32
__all__ = ["BadZipfile", "error", "ZIP_STORED", "ZIP_DEFLATED", "is_zipfile",
"ZipInfo", "ZipFile", "PyZipFile", "LargeZipFile" ]
class BadZipfile(Exception):
pass
class LargeZipFile(Exception):
"""
Raised when writing a zipfile, the zipfile requires ZIP64 extensions
and those extensions are disabled.
"""
error = BadZipfile # The exception raised by this module
ZIP64_LIMIT = (1 << 31) - 1
ZIP_FILECOUNT_LIMIT = 1 << 16
ZIP_MAX_COMMENT = (1 << 16) - 1
# constants for Zip file compression methods
ZIP_STORED = 0
ZIP_DEFLATED = 8
# Other ZIP compression methods not supported
# Below are some formats and associated data for reading/writing headers using
# the struct module. The names and structures of headers/records are those used
# in the PKWARE description of the ZIP file format:
# http://www.pkware.com/documents/casestudies/APPNOTE.TXT
# (URL valid as of January 2008)
# The "end of central directory" structure, magic number, size, and indices
# (section V.I in the format document)
structEndArchive = "<4s4H2LH"
stringEndArchive = "PK\005\006"
sizeEndCentDir = struct.calcsize(structEndArchive)
_ECD_SIGNATURE = 0
_ECD_DISK_NUMBER = 1
_ECD_DISK_START = 2
_ECD_ENTRIES_THIS_DISK = 3
_ECD_ENTRIES_TOTAL = 4
_ECD_SIZE = 5
_ECD_OFFSET = 6
_ECD_COMMENT_SIZE = 7
# These last two indices are not part of the structure as defined in the
# spec, but they are used internally by this module as a convenience
_ECD_COMMENT = 8
_ECD_LOCATION = 9
# The "central directory" structure, magic number, size, and indices
# of entries in the structure (section V.F in the format document)
structCentralDir = "<4s4B4HL2L5H2L"
stringCentralDir = "PK\001\002"
sizeCentralDir = struct.calcsize(structCentralDir)
# indexes of entries in the central directory structure
_CD_SIGNATURE = 0
_CD_CREATE_VERSION = 1
_CD_CREATE_SYSTEM = 2
_CD_EXTRACT_VERSION = 3
_CD_EXTRACT_SYSTEM = 4
_CD_FLAG_BITS = 5
_CD_COMPRESS_TYPE = 6
_CD_TIME = 7
_CD_DATE = 8
_CD_CRC = 9
_CD_COMPRESSED_SIZE = 10
_CD_UNCOMPRESSED_SIZE = 11
_CD_FILENAME_LENGTH = 12
_CD_EXTRA_FIELD_LENGTH = 13
_CD_COMMENT_LENGTH = 14
_CD_DISK_NUMBER_START = 15
_CD_INTERNAL_FILE_ATTRIBUTES = 16
_CD_EXTERNAL_FILE_ATTRIBUTES = 17
_CD_LOCAL_HEADER_OFFSET = 18
# The "local file header" structure, magic number, size, and indices
# (section V.A in the format document)
structFileHeader = "<4s2B4HL2L2H"
stringFileHeader = "PK\003\004"
sizeFileHeader = struct.calcsize(structFileHeader)
_FH_SIGNATURE = 0
_FH_EXTRACT_VERSION = 1
_FH_EXTRACT_SYSTEM = 2
_FH_GENERAL_PURPOSE_FLAG_BITS = 3
_FH_COMPRESSION_METHOD = 4
_FH_LAST_MOD_TIME = 5
_FH_LAST_MOD_DATE = 6
_FH_CRC = 7
_FH_COMPRESSED_SIZE = 8
_FH_UNCOMPRESSED_SIZE = 9
_FH_FILENAME_LENGTH = 10
_FH_EXTRA_FIELD_LENGTH = 11
# The "Zip64 end of central directory locator" structure, magic number, and size
structEndArchive64Locator = "<4sLQL"
stringEndArchive64Locator = "PK\x06\x07"
sizeEndCentDir64Locator = struct.calcsize(structEndArchive64Locator)
# The "Zip64 end of central directory" record, magic number, size, and indices
# (section V.G in the format document)
structEndArchive64 = "<4sQ2H2L4Q"
stringEndArchive64 = "PK\x06\x06"
sizeEndCentDir64 = struct.calcsize(structEndArchive64)
_CD64_SIGNATURE = 0
_CD64_DIRECTORY_RECSIZE = 1
_CD64_CREATE_VERSION = 2
_CD64_EXTRACT_VERSION = 3
_CD64_DISK_NUMBER = 4
_CD64_DISK_NUMBER_START = 5
_CD64_NUMBER_ENTRIES_THIS_DISK = 6
_CD64_NUMBER_ENTRIES_TOTAL = 7
_CD64_DIRECTORY_SIZE = 8
_CD64_OFFSET_START_CENTDIR = 9
def is_zipfile(filename):
"""Quickly see if file is a ZIP file by checking the magic number."""
try:
fpin = open(filename, "rb")
endrec = _EndRecData(fpin)
fpin.close()
if endrec:
return True # file has correct magic number
except IOError:
pass
return False
def _EndRecData64(fpin, offset, endrec):
"""
Read the ZIP64 end-of-archive records and use that to update endrec
"""
fpin.seek(offset - sizeEndCentDir64Locator, 2)
data = fpin.read(sizeEndCentDir64Locator)
sig, diskno, reloff, disks = struct.unpack(structEndArchive64Locator, data)
if sig != stringEndArchive64Locator:
return endrec
if diskno != 0 or disks != 1:
raise BadZipfile("zipfiles that span multiple disks are not supported")
# Assume no 'zip64 extensible data'
fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2)
data = fpin.read(sizeEndCentDir64)
sig, sz, create_version, read_version, disk_num, disk_dir, \
dircount, dircount2, dirsize, diroffset = \
struct.unpack(structEndArchive64, data)
if sig != stringEndArchive64:
return endrec
# Update the original endrec using data from the ZIP64 record
endrec[_ECD_SIGNATURE] = sig
endrec[_ECD_DISK_NUMBER] = disk_num
endrec[_ECD_DISK_START] = disk_dir
endrec[_ECD_ENTRIES_THIS_DISK] = dircount
endrec[_ECD_ENTRIES_TOTAL] = dircount2
endrec[_ECD_SIZE] = dirsize
endrec[_ECD_OFFSET] = diroffset
return endrec
def _EndRecData(fpin):
"""Return data from the "End of Central Directory" record, or None.
The data is a list of the nine items in the ZIP "End of central dir"
record followed by a tenth item, the file seek offset of this record."""
# Determine file size
fpin.seek(0, 2)
filesize = fpin.tell()
# Check to see if this is ZIP file with no archive comment (the
# "end of central directory" structure should be the last item in the
# file if this is the case).
fpin.seek(-sizeEndCentDir, 2)
data = fpin.read()
if data[0:4] == stringEndArchive and data[-2:] == "\000\000":
# the signature is correct and there's no comment, unpack structure
endrec = struct.unpack(structEndArchive, data)
endrec=list(endrec)
# Append a blank comment and record start offset
endrec.append("")
endrec.append(filesize - sizeEndCentDir)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, -sizeEndCentDir, endrec)
# Either this is not a ZIP file, or it is a ZIP file with an archive
# comment. Search the end of the file for the "end of central directory"
# record signature. The comment is the last item in the ZIP file and may be
# up to 64K long. It is assumed that the "end of central directory" magic
# number does not appear in the comment.
maxCommentStart = max(filesize - (1 << 16) - sizeEndCentDir, 0)
fpin.seek(maxCommentStart, 0)
data = fpin.read()
start = data.rfind(stringEndArchive)
if start >= 0:
# found the magic number; attempt to unpack and interpret
recData = data[start:start+sizeEndCentDir]
endrec = list(struct.unpack(structEndArchive, recData))
comment = data[start+sizeEndCentDir:]
# check that comment length is correct
if endrec[_ECD_COMMENT_SIZE] == len(comment):
# Append the archive comment and start offset
endrec.append(comment)
endrec.append(maxCommentStart + start)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, maxCommentStart + start - filesize,
endrec)
# Unable to find a valid end of central directory structure
return
class ZipInfo (object):
"""Class with attributes describing each file in the ZIP archive."""
__slots__ = (
'orig_filename',
'filename',
'date_time',
'compress_type',
'comment',
'extra',
'create_system',
'create_version',
'extract_version',
'reserved',
'flag_bits',
'volume',
'internal_attr',
'external_attr',
'header_offset',
'CRC',
'compress_size',
'file_size',
'_raw_time',
)
def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)):
self.orig_filename = filename # Original file name in archive
# Terminate the file name at the first null byte. Null bytes in file
# names are used as tricks by viruses in archives.
null_byte = filename.find(chr(0))
if null_byte >= 0:
filename = filename[0:null_byte]
# This is used to ensure paths in generated ZIP files always use
# forward slashes as the directory separator, as required by the
# ZIP format specification.
if os.sep != "/" and os.sep in filename:
filename = filename.replace(os.sep, "/")
self.filename = filename # Normalized file name
self.date_time = date_time # year, month, day, hour, min, sec
# Standard values:
self.compress_type = ZIP_STORED # Type of compression for the file
self.comment = "" # Comment for each file
self.extra = "" # ZIP extra data
if sys.platform == 'win32':
self.create_system = 0 # System which created ZIP archive
else:
# Assume everything else is unix-y
self.create_system = 3 # System which created ZIP archive
self.create_version = 20 # Version which created ZIP archive
self.extract_version = 20 # Version needed to extract archive
self.reserved = 0 # Must be zero
self.flag_bits = 0 # ZIP flag bits
self.volume = 0 # Volume number of file header
self.internal_attr = 0 # Internal attributes
self.external_attr = 0 # External file attributes
# Other attributes are set by class ZipFile:
# header_offset Byte offset to the file header
# CRC CRC-32 of the uncompressed file
# compress_size Size of the compressed file
# file_size Size of the uncompressed file
def FileHeader(self):
"""Return the per-file header as a string."""
dt = self.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
if self.flag_bits & 0x08:
# Set these to zero because we write them after the file data
CRC = compress_size = file_size = 0
else:
CRC = self.CRC
compress_size = self.compress_size
file_size = self.file_size
extra = self.extra
if file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT:
# File is larger than what fits into a 4 byte integer,
# fall back to the ZIP64 extension
fmt = '<HHQQ'
extra = extra + struct.pack(fmt,
1, struct.calcsize(fmt)-4, file_size, compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
self.extract_version = max(45, self.extract_version)
self.create_version = max(45, self.extract_version)
filename, flag_bits = self._encodeFilenameFlags()
header = struct.pack(structFileHeader, stringFileHeader,
self.extract_version, self.reserved, flag_bits,
self.compress_type, dostime, dosdate, CRC,
compress_size, file_size,
len(filename), len(extra))
return header + filename + extra
def _encodeFilenameFlags(self):
if isinstance(self.filename, unicode):
try:
return self.filename.encode('ascii'), self.flag_bits
except UnicodeEncodeError:
return self.filename.encode('utf-8'), self.flag_bits | 0x800
else:
return self.filename, self.flag_bits
def _decodeFilename(self):
if self.flag_bits & 0x800:
return self.filename.decode('utf-8')
else:
return self.filename
def _decodeExtra(self):
# Try to decode the extra field.
extra = self.extra
unpack = struct.unpack
while extra:
tp, ln = unpack('<HH', extra[:4])
if tp == 1:
if ln >= 24:
counts = unpack('<QQQ', extra[4:28])
elif ln == 16:
counts = unpack('<QQ', extra[4:20])
elif ln == 8:
counts = unpack('<Q', extra[4:12])
elif ln == 0:
counts = ()
else:
raise RuntimeError, "Corrupt extra field %s"%(ln,)
idx = 0
# ZIP64 extension (large files and/or large archives)
if self.file_size in (0xffffffffffffffffL, 0xffffffffL):
self.file_size = counts[idx]
idx += 1
if self.compress_size == 0xFFFFFFFFL:
self.compress_size = counts[idx]
idx += 1
if self.header_offset == 0xffffffffL:
old = self.header_offset
self.header_offset = counts[idx]
idx+=1
extra = extra[ln+4:]
class _ZipDecrypter:
"""Class to handle decryption of files stored within a ZIP archive.
ZIP supports a password-based form of encryption. Even though known
plaintext attacks have been found against it, it is still useful
to be able to get data out of such a file.
Usage:
zd = _ZipDecrypter(mypwd)
plain_char = zd(cypher_char)
plain_text = map(zd, cypher_text)
"""
def _GenerateCRCTable():
"""Generate a CRC-32 table.
ZIP encryption uses the CRC32 one-byte primitive for scrambling some
internal keys. We noticed that a direct implementation is faster than
relying on binascii.crc32().
"""
poly = 0xedb88320
table = [0] * 256
for i in range(256):
crc = i
for j in range(8):
if crc & 1:
crc = ((crc >> 1) & 0x7FFFFFFF) ^ poly
else:
crc = ((crc >> 1) & 0x7FFFFFFF)
table[i] = crc
return table
crctable = _GenerateCRCTable()
def _crc32(self, ch, crc):
"""Compute the CRC32 primitive on one byte."""
return ((crc >> 8) & 0xffffff) ^ self.crctable[(crc ^ ord(ch)) & 0xff]
def __init__(self, pwd):
self.key0 = 305419896
self.key1 = 591751049
self.key2 = 878082192
for p in pwd:
self._UpdateKeys(p)
def _UpdateKeys(self, c):
self.key0 = self._crc32(c, self.key0)
self.key1 = (self.key1 + (self.key0 & 255)) & 4294967295
self.key1 = (self.key1 * 134775813 + 1) & 4294967295
self.key2 = self._crc32(chr((self.key1 >> 24) & 255), self.key2)
def __call__(self, c):
"""Decrypt a single character."""
c = ord(c)
k = self.key2 | 2
c = c ^ (((k * (k^1)) >> 8) & 255)
c = chr(c)
self._UpdateKeys(c)
return c
class ZipExtFile:
"""File-like object for reading an archive member.
Is returned by ZipFile.open().
"""
def __init__(self, fileobj, zipinfo, decrypt=None):
self.fileobj = fileobj
self.decrypter = decrypt
self.bytes_read = 0L
self.rawbuffer = ''
self.readbuffer = ''
self.linebuffer = ''
self.eof = False
self.univ_newlines = False
self.nlSeps = ("\n", )
self.lastdiscard = ''
self.compress_type = zipinfo.compress_type
self.compress_size = zipinfo.compress_size
self.closed = False
self.mode = "r"
self.name = zipinfo.filename
# read from compressed files in 64k blocks
self.compreadsize = 64*1024
if self.compress_type == ZIP_DEFLATED:
self.dc = zlib.decompressobj(-15)
def set_univ_newlines(self, univ_newlines):
self.univ_newlines = univ_newlines
# pick line separator char(s) based on universal newlines flag
self.nlSeps = ("\n", )
if self.univ_newlines:
self.nlSeps = ("\r\n", "\r", "\n")
def __iter__(self):
return self
def next(self):
nextline = self.readline()
if not nextline:
raise StopIteration()
return nextline
def close(self):
self.closed = True
def _checkfornewline(self):
nl, nllen = -1, -1
if self.linebuffer:
# ugly check for cases where half of an \r\n pair was
# read on the last pass, and the \r was discarded. In this
# case we just throw away the \n at the start of the buffer.
if (self.lastdiscard, self.linebuffer[0]) == ('\r','\n'):
self.linebuffer = self.linebuffer[1:]
for sep in self.nlSeps:
nl = self.linebuffer.find(sep)
if nl >= 0:
nllen = len(sep)
return nl, nllen
return nl, nllen
def readline(self, size = -1):
"""Read a line with approx. size. If size is negative,
read a whole line.
"""
if size < 0:
size = sys.maxint
elif size == 0:
return ''
# check for a newline already in buffer
nl, nllen = self._checkfornewline()
if nl >= 0:
# the next line was already in the buffer
nl = min(nl, size)
else:
# no line break in buffer - try to read more
size -= len(self.linebuffer)
while nl < 0 and size > 0:
buf = self.read(min(size, 100))
if not buf:
break
self.linebuffer += buf
size -= len(buf)
# check for a newline in buffer
nl, nllen = self._checkfornewline()
# we either ran out of bytes in the file, or
# met the specified size limit without finding a newline,
# so return current buffer
if nl < 0:
s = self.linebuffer
self.linebuffer = ''
return s
buf = self.linebuffer[:nl]
self.lastdiscard = self.linebuffer[nl:nl + nllen]
self.linebuffer = self.linebuffer[nl + nllen:]
# line is always returned with \n as newline char (except possibly
# for a final incomplete line in the file, which is handled above).
return buf + "\n"
def readlines(self, sizehint = -1):
"""Return a list with all (following) lines. The sizehint parameter
is ignored in this implementation.
"""
result = []
while True:
line = self.readline()
if not line: break
result.append(line)
return result
def read(self, size = None):
# act like file() obj and return empty string if size is 0
if size == 0:
return ''
# determine read size
bytesToRead = self.compress_size - self.bytes_read
# adjust read size for encrypted files since the first 12 bytes
# are for the encryption/password information
if self.decrypter is not None:
bytesToRead -= 12
if size is not None and size >= 0:
if self.compress_type == ZIP_STORED:
lr = len(self.readbuffer)
bytesToRead = min(bytesToRead, size - lr)
elif self.compress_type == ZIP_DEFLATED:
if len(self.readbuffer) > size:
# the user has requested fewer bytes than we've already
# pulled through the decompressor; don't read any more
bytesToRead = 0
else:
# user will use up the buffer, so read some more
lr = len(self.rawbuffer)
bytesToRead = min(bytesToRead, self.compreadsize - lr)
# avoid reading past end of file contents
if bytesToRead + self.bytes_read > self.compress_size:
bytesToRead = self.compress_size - self.bytes_read
# try to read from file (if necessary)
if bytesToRead > 0:
bytes = self.fileobj.read(bytesToRead)
self.bytes_read += len(bytes)
self.rawbuffer += bytes
# handle contents of raw buffer
if self.rawbuffer:
newdata = self.rawbuffer
self.rawbuffer = ''
# decrypt new data if we were given an object to handle that
if newdata and self.decrypter is not None:
newdata = ''.join(map(self.decrypter, newdata))
# decompress newly read data if necessary
if newdata and self.compress_type == ZIP_DEFLATED:
newdata = self.dc.decompress(newdata)
self.rawbuffer = self.dc.unconsumed_tail
if self.eof and len(self.rawbuffer) == 0:
# we're out of raw bytes (both from the file and
# the local buffer); flush just to make sure the
# decompressor is done
newdata += self.dc.flush()
# prevent decompressor from being used again
self.dc = None
self.readbuffer += newdata
# return what the user asked for
if size is None or len(self.readbuffer) <= size:
bytes = self.readbuffer
self.readbuffer = ''
else:
bytes = self.readbuffer[:size]
self.readbuffer = self.readbuffer[size:]
return bytes
class ZipFile:
""" Class with methods to open, read, write, close, list zip files.
z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=False)
file: Either the path to the file, or a file-like object.
If it is a path, the file will be opened and closed by ZipFile.
mode: The mode can be either read "r", write "w" or append "a".
compression: ZIP_STORED (no compression) or ZIP_DEFLATED (requires zlib).
allowZip64: if True ZipFile will create files with ZIP64 extensions when
needed, otherwise it will raise an exception when this would
be necessary.
"""
fp = None # Set here since __del__ checks it
def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=False):
"""Open the ZIP file with mode read "r", write "w" or append "a"."""
if mode not in ("r", "w", "a"):
raise RuntimeError('ZipFile() requires mode "r", "w", or "a"')
if compression == ZIP_STORED:
pass
elif compression == ZIP_DEFLATED:
if not zlib:
raise RuntimeError,\
"Compression requires the (missing) zlib module"
else:
raise RuntimeError, "That compression method is not supported"
self._allowZip64 = allowZip64
self._didModify = False
self.debug = 0 # Level of printing: 0 through 3
self.NameToInfo = {} # Find file info given name
self.filelist = [] # List of ZipInfo instances for archive
self.compression = compression # Method of compression
self.mode = key = mode.replace('b', '')[0]
self.pwd = None
self.comment = ''
# Check if we were passed a file-like object
if isinstance(file, basestring):
self._filePassed = 0
self.filename = file
modeDict = {'r' : 'rb', 'w': 'wb', 'a' : 'r+b'}
try:
self.fp = open(file, modeDict[mode])
except IOError:
if mode == 'a':
mode = key = 'w'
self.fp = open(file, modeDict[mode])
else:
raise
else:
self._filePassed = 1
self.fp = file
self.filename = getattr(file, 'name', None)
if key == 'r':
self._GetContents()
elif key == 'w':
pass
elif key == 'a':
try: # See if file is a zip file
self._RealGetContents()
# seek to start of directory and overwrite
self.fp.seek(self.start_dir, 0)
except BadZipfile: # file is not a zip file, just append
self.fp.seek(0, 2)
else:
if not self._filePassed:
self.fp.close()
self.fp = None
raise RuntimeError, 'Mode must be "r", "w" or "a"'
def _GetContents(self):
"""Read the directory, making sure we close the file if the format
is bad."""
try:
self._RealGetContents()
except BadZipfile:
if not self._filePassed:
self.fp.close()
self.fp = None
raise
def _RealGetContents(self):
"""Read in the table of contents for the ZIP file."""
fp = self.fp
endrec = _EndRecData(fp)
if not endrec:
raise BadZipfile, "File is not a zip file"
if self.debug > 1:
print endrec
size_cd = endrec[_ECD_SIZE] # bytes in central directory
offset_cd = endrec[_ECD_OFFSET] # offset of central directory
self.comment = endrec[_ECD_COMMENT] # archive comment
# "concat" is zero, unless zip was concatenated to another file
concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
if endrec[_ECD_SIGNATURE] == stringEndArchive64:
# If Zip64 extension structures are present, account for them
concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
if self.debug > 2:
inferred = concat + offset_cd
print "given, inferred, offset", offset_cd, inferred, concat
# self.start_dir: Position of start of central directory
self.start_dir = offset_cd + concat
fp.seek(self.start_dir, 0)
data = fp.read(size_cd)
fp = cStringIO.StringIO(data)
total = 0
while total < size_cd:
centdir = fp.read(sizeCentralDir)
if centdir[0:4] != stringCentralDir:
raise BadZipfile, "Bad magic number for central directory"
centdir = struct.unpack(structCentralDir, centdir)
if self.debug > 2:
print centdir
filename = fp.read(centdir[_CD_FILENAME_LENGTH])
# Create ZipInfo instance to store file information
x = ZipInfo(filename)
x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
(x.create_version, x.create_system, x.extract_version, x.reserved,
x.flag_bits, x.compress_type, t, d,
x.CRC, x.compress_size, x.file_size) = centdir[1:12]
x.volume, x.internal_attr, x.external_attr = centdir[15:18]
# Convert date/time code to (year, month, day, hour, min, sec)
x._raw_time = t
x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F,
t>>11, (t>>5)&0x3F, (t&0x1F) * 2 )
x._decodeExtra()
x.header_offset = x.header_offset + concat
x.filename = x._decodeFilename()
self.filelist.append(x)
self.NameToInfo[x.filename] = x
# update total bytes read from central directory
total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH]
+ centdir[_CD_EXTRA_FIELD_LENGTH]
+ centdir[_CD_COMMENT_LENGTH])
if self.debug > 2:
print "total", total
def namelist(self):
"""Return a list of file names in the archive."""
l = []
for data in self.filelist:
l.append(data.filename)
return l
def infolist(self):
"""Return a list of class ZipInfo instances for files in the
archive."""
return self.filelist
def printdir(self):
"""Print a table of contents for the zip file."""
print "%-46s %19s %12s" % ("File Name", "Modified ", "Size")
for zinfo in self.filelist:
date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
print "%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size)
def testzip(self):
"""Read all the files and check the CRC."""
chunk_size = 2 ** 20
for zinfo in self.filelist:
try:
# Read by chunks, to avoid an OverflowError or a
# MemoryError with very large embedded files.
f = self.open(zinfo.filename, "r")
while f.read(chunk_size): # Check CRC-32
pass
except BadZipfile:
return zinfo.filename
def getinfo(self, name):
"""Return the instance of ZipInfo given 'name'."""
info = self.NameToInfo.get(name)
if info is None:
raise KeyError(
'There is no item named %r in the archive' % name)
return info
def setpassword(self, pwd):
"""Set default password for encrypted files."""
self.pwd = pwd
def read(self, name, pwd=None):
"""Return file bytes (as a string) for name."""
return self.open(name, "r", pwd).read()
def open(self, name, mode="r", pwd=None):
"""Return file-like object for 'name'."""
if mode not in ("r", "U", "rU"):
raise RuntimeError, 'open() requires mode "r", "U", or "rU"'
if not self.fp:
raise RuntimeError, \
"Attempt to read ZIP archive that was already closed"
# Only open a new file for instances where we were not
# given a file object in the constructor
if self._filePassed:
zef_file = self.fp
else:
zef_file = open(self.filename, 'rb')
# Make sure we have an info object
if isinstance(name, ZipInfo):
# 'name' is already an info object
zinfo = name
else:
# Get info object for name
zinfo = self.getinfo(name)
zef_file.seek(zinfo.header_offset, 0)
# Skip the file header:
fheader = zef_file.read(sizeFileHeader)
if fheader[0:4] != stringFileHeader:
raise BadZipfile, "Bad magic number for file header"
fheader = struct.unpack(structFileHeader, fheader)
fname = zef_file.read(fheader[_FH_FILENAME_LENGTH])
if fheader[_FH_EXTRA_FIELD_LENGTH]:
zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
if fname != zinfo.orig_filename:
raise BadZipfile, \
'File name in directory "%s" and header "%s" differ.' % (
zinfo.orig_filename, fname)
# check for encrypted flag & handle password
is_encrypted = zinfo.flag_bits & 0x1
zd = None
if is_encrypted:
if not pwd:
pwd = self.pwd
if not pwd:
raise RuntimeError, "File %s is encrypted, " \
"password required for extraction" % name
zd = _ZipDecrypter(pwd)
# The first 12 bytes in the cypher stream is an encryption header
# used to strengthen the algorithm. The first 11 bytes are
# completely random, while the 12th contains the MSB of the CRC,
# or the MSB of the file time depending on the header type
# and is used to check the correctness of the password.
bytes = zef_file.read(12)
h = map(zd, bytes[0:12])
if zinfo.flag_bits & 0x8:
# compare against the file type from extended local headers
check_byte = (zinfo._raw_time >> 8) & 0xff
else:
# compare against the CRC otherwise
check_byte = (zinfo.CRC >> 24) & 0xff
if ord(h[11]) != check_byte:
raise RuntimeError("Bad password for file", name)
# build and return a ZipExtFile
if zd is None:
zef = ZipExtFile(zef_file, zinfo)
else:
zef = ZipExtFile(zef_file, zinfo, zd)
# set universal newlines on ZipExtFile if necessary
if "U" in mode:
zef.set_univ_newlines(True)
return zef
def extract(self, member, path=None, pwd=None):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a ZipInfo object. You can
specify a different directory using `path'.
"""
if not isinstance(member, ZipInfo):
member = self.getinfo(member)
if path is None:
path = os.getcwd()
return self._extract_member(member, path, pwd)
def extractall(self, path=None, members=None, pwd=None):
"""Extract all members from the archive to the current working
directory. `path' specifies a different directory to extract to.
`members' is optional and must be a subset of the list returned
by namelist().
"""
if members is None:
members = self.namelist()
for zipinfo in members:
self.extract(zipinfo, path, pwd)
def _extract_member(self, member, targetpath, pwd):
"""Extract the ZipInfo object 'member' to a physical
file on the path targetpath.
"""
# build the destination pathname, replacing
# forward slashes to platform specific separators.
if targetpath[-1:] in (os.path.sep, os.path.altsep):
targetpath = targetpath[:-1]
# don't include leading "/" from file name if present
if member.filename[0] == '/':
targetpath = os.path.join(targetpath, member.filename[1:])
else:
targetpath = os.path.join(targetpath, member.filename)
targetpath = os.path.normpath(targetpath)
# Create all upper directories if necessary.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
if member.filename[-1] == '/':
os.mkdir(targetpath)
return targetpath
source = self.open(member, pwd=pwd)
target = file(targetpath, "wb")
shutil.copyfileobj(source, target)
source.close()
target.close()
return targetpath
def _writecheck(self, zinfo):
"""Check for errors before writing a file to the archive."""
if zinfo.filename in self.NameToInfo:
if self.debug: # Warning for duplicate names
print "Duplicate name:", zinfo.filename
if self.mode not in ("w", "a"):
raise RuntimeError, 'write() requires mode "w" or "a"'
if not self.fp:
raise RuntimeError, \
"Attempt to write ZIP archive that was already closed"
if zinfo.compress_type == ZIP_DEFLATED and not zlib:
raise RuntimeError, \
"Compression requires the (missing) zlib module"
if zinfo.compress_type not in (ZIP_STORED, ZIP_DEFLATED):
raise RuntimeError, \
"That compression method is not supported"
if zinfo.file_size > ZIP64_LIMIT:
if not self._allowZip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
if zinfo.header_offset > ZIP64_LIMIT:
if not self._allowZip64:
raise LargeZipFile("Zipfile size would require ZIP64 extensions")
def write(self, filename, arcname=None, compress_type=None):
"""Put the bytes from filename into the archive under the name
arcname."""
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
st = os.stat(filename)
isdir = stat.S_ISDIR(st.st_mode)
mtime = time.localtime(st.st_mtime)
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
arcname = filename
arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
while arcname[0] in (os.sep, os.altsep):
arcname = arcname[1:]
if isdir:
arcname += '/'
zinfo = ZipInfo(arcname, date_time)
zinfo.external_attr = (st[0] & 0xFFFF) << 16L # Unix attributes
if compress_type is None:
zinfo.compress_type = self.compression
else:
zinfo.compress_type = compress_type
zinfo.file_size = st.st_size
zinfo.flag_bits = 0x00
zinfo.header_offset = self.fp.tell() # Start of header bytes
self._writecheck(zinfo)
self._didModify = True
if isdir:
zinfo.file_size = 0
zinfo.compress_size = 0
zinfo.CRC = 0
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
self.fp.write(zinfo.FileHeader())
return
fp = open(filename, "rb")
# Must overwrite CRC and sizes with correct data later
zinfo.CRC = CRC = 0
zinfo.compress_size = compress_size = 0
zinfo.file_size = file_size = 0
self.fp.write(zinfo.FileHeader())
if zinfo.compress_type == ZIP_DEFLATED:
cmpr = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
else:
cmpr = None
while 1:
buf = fp.read(1024 * 8)
if not buf:
break
file_size = file_size + len(buf)
CRC = crc32(buf, CRC) & 0xffffffff
if cmpr:
buf = cmpr.compress(buf)
compress_size = compress_size + len(buf)
self.fp.write(buf)
fp.close()
if cmpr:
buf = cmpr.flush()
compress_size = compress_size + len(buf)
self.fp.write(buf)
zinfo.compress_size = compress_size
else:
zinfo.compress_size = file_size
zinfo.CRC = CRC
zinfo.file_size = file_size
# Seek backwards and write CRC and file sizes
position = self.fp.tell() # Preserve current position in file
self.fp.seek(zinfo.header_offset + 14, 0)
self.fp.write(struct.pack("<LLL", zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.fp.seek(position, 0)
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def writestr(self, zinfo_or_arcname, bytes):
"""Write a file into the archive. The contents is the string
'bytes'. 'zinfo_or_arcname' is either a ZipInfo instance or
the name of the file in the archive."""
if not isinstance(zinfo_or_arcname, ZipInfo):
zinfo = ZipInfo(filename=zinfo_or_arcname,
date_time=time.localtime(time.time())[:6])
zinfo.compress_type = self.compression
zinfo.external_attr = 0600 << 16
else:
zinfo = zinfo_or_arcname
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
zinfo.file_size = len(bytes) # Uncompressed size
zinfo.header_offset = self.fp.tell() # Start of header bytes
self._writecheck(zinfo)
self._didModify = True
zinfo.CRC = crc32(bytes) & 0xffffffff # CRC-32 checksum
if zinfo.compress_type == ZIP_DEFLATED:
co = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
bytes = co.compress(bytes) + co.flush()
zinfo.compress_size = len(bytes) # Compressed size
else:
zinfo.compress_size = zinfo.file_size
zinfo.header_offset = self.fp.tell() # Start of header bytes
self.fp.write(zinfo.FileHeader())
self.fp.write(bytes)
self.fp.flush()
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
self.fp.write(struct.pack("<lLL", zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def __del__(self):
"""Call the "close()" method in case the user forgot."""
self.close()
def close(self):
"""Close the file, and for mode "w" and "a" write the ending
records."""
if self.fp is None:
return
if self.mode in ("w", "a") and self._didModify: # write ending records
count = 0
pos1 = self.fp.tell()
for zinfo in self.filelist: # write central directory
count = count + 1
dt = zinfo.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
extra = []
if zinfo.file_size > ZIP64_LIMIT \
or zinfo.compress_size > ZIP64_LIMIT:
extra.append(zinfo.file_size)
extra.append(zinfo.compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
else:
file_size = zinfo.file_size
compress_size = zinfo.compress_size
if zinfo.header_offset > ZIP64_LIMIT:
extra.append(zinfo.header_offset)
header_offset = 0xffffffffL
else:
header_offset = zinfo.header_offset
extra_data = zinfo.extra
if extra:
# Append a ZIP64 field to the extra's
extra_data = struct.pack(
'<HH' + 'Q'*len(extra),
1, 8*len(extra), *extra) + extra_data
extract_version = max(45, zinfo.extract_version)
create_version = max(45, zinfo.create_version)
else:
extract_version = zinfo.extract_version
create_version = zinfo.create_version
try:
filename, flag_bits = zinfo._encodeFilenameFlags()
centdir = struct.pack(structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
except DeprecationWarning:
print >>sys.stderr, (structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(zinfo.filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
raise
self.fp.write(centdir)
self.fp.write(filename)
self.fp.write(extra_data)
self.fp.write(zinfo.comment)
pos2 = self.fp.tell()
# Write end-of-zip-archive record
centDirCount = count
centDirSize = pos2 - pos1
centDirOffset = pos1
if (centDirCount >= ZIP_FILECOUNT_LIMIT or
centDirOffset > ZIP64_LIMIT or
centDirSize > ZIP64_LIMIT):
# Need to write the ZIP64 end-of-archive records
zip64endrec = struct.pack(
structEndArchive64, stringEndArchive64,
44, 45, 45, 0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset)
self.fp.write(zip64endrec)
zip64locrec = struct.pack(
structEndArchive64Locator,
stringEndArchive64Locator, 0, pos2, 1)
self.fp.write(zip64locrec)
centDirCount = min(centDirCount, 0xFFFF)
centDirSize = min(centDirSize, 0xFFFFFFFF)
centDirOffset = min(centDirOffset, 0xFFFFFFFF)
# check for valid comment length
if len(self.comment) >= ZIP_MAX_COMMENT:
if self.debug > 0:
msg = 'Archive comment is too long; truncating to %d bytes' \
% ZIP_MAX_COMMENT
self.comment = self.comment[:ZIP_MAX_COMMENT]
endrec = struct.pack(structEndArchive, stringEndArchive,
0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset, len(self.comment))
self.fp.write(endrec)
self.fp.write(self.comment)
self.fp.flush()
if not self._filePassed:
self.fp.close()
self.fp = None
class PyZipFile(ZipFile):
"""Class to create ZIP archives with Python library files and packages."""
def writepy(self, pathname, basename = ""):
"""Add all files from "pathname" to the ZIP archive.
If pathname is a package directory, search the directory and
all package subdirectories recursively for all *.py and enter
the modules into the archive. If pathname is a plain
directory, listdir *.py and enter all modules. Else, pathname
must be a Python *.py file and the module will be put into the
archive. Added modules are always module.pyo or module.pyc.
This method will compile the module.py into module.pyc if
necessary.
"""
dir, name = os.path.split(pathname)
if os.path.isdir(pathname):
initname = os.path.join(pathname, "__init__.py")
if os.path.isfile(initname):
# This is a package directory, add it
if basename:
basename = "%s/%s" % (basename, name)
else:
basename = name
if self.debug:
print "Adding package in", pathname, "as", basename
fname, arcname = self._get_codename(initname[0:-3], basename)
if self.debug:
print "Adding", arcname
self.write(fname, arcname)
dirlist = os.listdir(pathname)
dirlist.remove("__init__.py")
# Add all *.py files and package subdirectories
for filename in dirlist:
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if os.path.isdir(path):
if os.path.isfile(os.path.join(path, "__init__.py")):
# This is a package directory, add it
self.writepy(path, basename) # Recursive call
elif ext == ".py":
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print "Adding", arcname
self.write(fname, arcname)
else:
# This is NOT a package directory, add its files at top level
if self.debug:
print "Adding files from directory", pathname
for filename in os.listdir(pathname):
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if ext == ".py":
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print "Adding", arcname
self.write(fname, arcname)
else:
if pathname[-3:] != ".py":
raise RuntimeError, \
'Files added with writepy() must end with ".py"'
fname, arcname = self._get_codename(pathname[0:-3], basename)
if self.debug:
print "Adding file", arcname
self.write(fname, arcname)
def _get_codename(self, pathname, basename):
"""Return (filename, archivename) for the path.
Given a module name path, return the correct file path and
archive name, compiling if necessary. For example, given
/python/lib/string, return (/python/lib/string.pyc, string).
"""
file_py = pathname + ".py"
file_pyc = pathname + ".pyc"
file_pyo = pathname + ".pyo"
if os.path.isfile(file_pyo) and \
os.stat(file_pyo).st_mtime >= os.stat(file_py).st_mtime:
fname = file_pyo # Use .pyo file
elif not os.path.isfile(file_pyc) or \
os.stat(file_pyc).st_mtime < os.stat(file_py).st_mtime:
import py_compile
if self.debug:
print "Compiling", file_py
try:
py_compile.compile(file_py, file_pyc, None, True)
except py_compile.PyCompileError,err:
print err.msg
fname = file_pyc
else:
fname = file_pyc
archivename = os.path.split(fname)[1]
if basename:
archivename = "%s/%s" % (basename, archivename)
return (fname, archivename)
def main(args = None):
import textwrap
USAGE=textwrap.dedent("""\
Usage:
zipfile.py -l zipfile.zip # Show listing of a zipfile
zipfile.py -t zipfile.zip # Test if a zipfile is valid
zipfile.py -e zipfile.zip target # Extract zipfile into target dir
zipfile.py -c zipfile.zip src ... # Create zipfile from sources
""")
if args is None:
args = sys.argv[1:]
if not args or args[0] not in ('-l', '-c', '-e', '-t'):
print USAGE
sys.exit(1)
if args[0] == '-l':
if len(args) != 2:
print USAGE
sys.exit(1)
zf = ZipFile(args[1], 'r')
zf.printdir()
zf.close()
elif args[0] == '-t':
if len(args) != 2:
print USAGE
sys.exit(1)
zf = ZipFile(args[1], 'r')
zf.testzip()
print "Done testing"
elif args[0] == '-e':
if len(args) != 3:
print USAGE
sys.exit(1)
zf = ZipFile(args[1], 'r')
out = args[2]
for path in zf.namelist():
if path.startswith('./'):
tgt = os.path.join(out, path[2:])
else:
tgt = os.path.join(out, path)
tgtdir = os.path.dirname(tgt)
if not os.path.exists(tgtdir):
os.makedirs(tgtdir)
fp = open(tgt, 'wb')
fp.write(zf.read(path))
fp.close()
zf.close()
elif args[0] == '-c':
if len(args) < 3:
print USAGE
sys.exit(1)
def addToZip(zf, path, zippath):
if os.path.isfile(path):
zf.write(path, zippath, ZIP_DEFLATED)
elif os.path.isdir(path):
for nm in os.listdir(path):
addToZip(zf,
os.path.join(path, nm), os.path.join(zippath, nm))
# else: ignore
zf = ZipFile(args[1], 'w', allowZip64=True)
for src in args[2:]:
addToZip(zf, src, os.path.basename(src))
zf.close()
if __name__ == "__main__":
main()
| apache-2.0 |
SakuradaJun/django-allauth | allauth/account/decorators.py | 47 | 1623 | from django.contrib.auth.decorators import login_required
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.shortcuts import render
from .models import EmailAddress
from .utils import send_email_confirmation
def verified_email_required(function=None,
login_url=None,
redirect_field_name=REDIRECT_FIELD_NAME):
"""
Even when email verification is not mandatory during signup, there
may be circumstances during which you really want to prevent
unverified users to proceed. This decorator ensures the user is
authenticated and has a verified email address. If the former is
not the case then the behavior is identical to that of the
standard `login_required` decorator. If the latter does not hold,
email verification mails are automatically resend and the user is
presented with a page informing them they needs to verify their email
address.
"""
def decorator(view_func):
@login_required(redirect_field_name=redirect_field_name,
login_url=login_url)
def _wrapped_view(request, *args, **kwargs):
if not EmailAddress.objects.filter(user=request.user,
verified=True).exists():
send_email_confirmation(request, request.user)
return render(request,
'account/verified_email_required.html')
return view_func(request, *args, **kwargs)
return _wrapped_view
if function:
return decorator(function)
return decorator
| mit |
vialectrum/vialectrum | electrum_ltc/lnwatcher.py | 1 | 17335 | # Copyright (C) 2018 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
from typing import NamedTuple, Iterable, TYPE_CHECKING
import os
import asyncio
from enum import IntEnum, auto
from typing import NamedTuple, Dict
from . import util
from .sql_db import SqlDB, sql
from .wallet_db import WalletDB
from .util import bh2u, bfh, log_exceptions, ignore_exceptions, TxMinedInfo
from .address_synchronizer import AddressSynchronizer, TX_HEIGHT_LOCAL, TX_HEIGHT_UNCONF_PARENT, TX_HEIGHT_UNCONFIRMED
from .transaction import Transaction
if TYPE_CHECKING:
from .network import Network
from .lnsweep import SweepInfo
from .lnworker import LNWallet
class ListenerItem(NamedTuple):
# this is triggered when the lnwatcher is all done with the outpoint used as index in LNWatcher.tx_progress
all_done : asyncio.Event
# txs we broadcast are put on this queue so that the test can wait for them to get mined
tx_queue : asyncio.Queue
class TxMinedDepth(IntEnum):
""" IntEnum because we call min() in get_deepest_tx_mined_depth_for_txids """
DEEP = auto()
SHALLOW = auto()
MEMPOOL = auto()
FREE = auto()
create_sweep_txs="""
CREATE TABLE IF NOT EXISTS sweep_txs (
funding_outpoint VARCHAR(34) NOT NULL,
ctn INTEGER NOT NULL,
prevout VARCHAR(34),
tx VARCHAR
)"""
create_channel_info="""
CREATE TABLE IF NOT EXISTS channel_info (
outpoint VARCHAR(34) NOT NULL,
address VARCHAR(32),
PRIMARY KEY(outpoint)
)"""
class SweepStore(SqlDB):
def __init__(self, path, network):
super().__init__(network.asyncio_loop, path)
def create_database(self):
c = self.conn.cursor()
c.execute(create_channel_info)
c.execute(create_sweep_txs)
self.conn.commit()
@sql
def get_sweep_tx(self, funding_outpoint, prevout):
c = self.conn.cursor()
c.execute("SELECT tx FROM sweep_txs WHERE funding_outpoint=? AND prevout=?", (funding_outpoint, prevout))
return [Transaction(bh2u(r[0])) for r in c.fetchall()]
@sql
def list_sweep_tx(self):
c = self.conn.cursor()
c.execute("SELECT funding_outpoint FROM sweep_txs")
return set([r[0] for r in c.fetchall()])
@sql
def add_sweep_tx(self, funding_outpoint, ctn, prevout, raw_tx):
c = self.conn.cursor()
assert Transaction(raw_tx).is_complete()
c.execute("""INSERT INTO sweep_txs (funding_outpoint, ctn, prevout, tx) VALUES (?,?,?,?)""", (funding_outpoint, ctn, prevout, bfh(raw_tx)))
self.conn.commit()
@sql
def get_num_tx(self, funding_outpoint):
c = self.conn.cursor()
c.execute("SELECT count(*) FROM sweep_txs WHERE funding_outpoint=?", (funding_outpoint,))
return int(c.fetchone()[0])
@sql
def get_ctn(self, outpoint, addr):
if not self._has_channel(outpoint):
self._add_channel(outpoint, addr)
c = self.conn.cursor()
c.execute("SELECT max(ctn) FROM sweep_txs WHERE funding_outpoint=?", (outpoint,))
return int(c.fetchone()[0] or 0)
@sql
def remove_sweep_tx(self, funding_outpoint):
c = self.conn.cursor()
c.execute("DELETE FROM sweep_txs WHERE funding_outpoint=?", (funding_outpoint,))
self.conn.commit()
def _add_channel(self, outpoint, address):
c = self.conn.cursor()
c.execute("INSERT INTO channel_info (address, outpoint) VALUES (?,?)", (address, outpoint))
self.conn.commit()
@sql
def remove_channel(self, outpoint):
c = self.conn.cursor()
c.execute("DELETE FROM channel_info WHERE outpoint=?", (outpoint,))
self.conn.commit()
def _has_channel(self, outpoint):
c = self.conn.cursor()
c.execute("SELECT * FROM channel_info WHERE outpoint=?", (outpoint,))
r = c.fetchone()
return r is not None
@sql
def get_address(self, outpoint):
c = self.conn.cursor()
c.execute("SELECT address FROM channel_info WHERE outpoint=?", (outpoint,))
r = c.fetchone()
return r[0] if r else None
@sql
def list_channels(self):
c = self.conn.cursor()
c.execute("SELECT outpoint, address FROM channel_info")
return [(r[0], r[1]) for r in c.fetchall()]
class LNWatcher(AddressSynchronizer):
LOGGING_SHORTCUT = 'W'
def __init__(self, network: 'Network'):
AddressSynchronizer.__init__(self, WalletDB({}, manual_upgrades=False))
self.config = network.config
self.channels = {}
self.network = network
util.register_callback(
self.on_network_update,
['network_updated', 'blockchain_updated', 'verified', 'wallet_updated', 'fee'])
# status gets populated when we run
self.channel_status = {}
def get_channel_status(self, outpoint):
return self.channel_status.get(outpoint, 'unknown')
def add_channel(self, outpoint: str, address: str) -> None:
assert isinstance(outpoint, str)
assert isinstance(address, str)
self.add_address(address)
self.channels[address] = outpoint
async def unwatch_channel(self, address, funding_outpoint):
self.logger.info(f'unwatching {funding_outpoint}')
self.channels.pop(address, None)
@log_exceptions
async def on_network_update(self, event, *args):
if event in ('verified', 'wallet_updated'):
if args[0] != self:
return
if not self.synchronizer:
self.logger.info("synchronizer not set yet")
return
channels_items = list(self.channels.items()) # copy
for address, outpoint in channels_items:
await self.check_onchain_situation(address, outpoint)
async def check_onchain_situation(self, address, funding_outpoint):
# early return if address has not been added yet
if not self.is_mine(address):
return
spenders = self.inspect_tx_candidate(funding_outpoint, 0)
# inspect_tx_candidate might have added new addresses, in which case we return ealy
if not self.is_up_to_date():
return
funding_txid = funding_outpoint.split(':')[0]
funding_height = self.get_tx_height(funding_txid)
closing_txid = spenders.get(funding_outpoint)
closing_height = self.get_tx_height(closing_txid)
if closing_txid:
closing_tx = self.db.get_transaction(closing_txid)
if closing_tx:
keep_watching = await self.do_breach_remedy(funding_outpoint, closing_tx, spenders)
else:
self.logger.info(f"channel {funding_outpoint} closed by {closing_txid}. still waiting for tx itself...")
keep_watching = True
else:
keep_watching = True
await self.update_channel_state(
funding_outpoint=funding_outpoint,
funding_txid=funding_txid,
funding_height=funding_height,
closing_txid=closing_txid,
closing_height=closing_height,
keep_watching=keep_watching)
if not keep_watching:
await self.unwatch_channel(address, funding_outpoint)
async def do_breach_remedy(self, funding_outpoint, closing_tx, spenders) -> bool:
raise NotImplementedError() # implemented by subclasses
async def update_channel_state(self, *, funding_outpoint: str, funding_txid: str,
funding_height: TxMinedInfo, closing_txid: str,
closing_height: TxMinedInfo, keep_watching: bool) -> None:
raise NotImplementedError() # implemented by subclasses
def inspect_tx_candidate(self, outpoint, n):
prev_txid, index = outpoint.split(':')
txid = self.db.get_spent_outpoint(prev_txid, int(index))
result = {outpoint:txid}
if txid is None:
self.channel_status[outpoint] = 'open'
return result
if n == 0 and not self.is_deeply_mined(txid):
self.channel_status[outpoint] = 'closed (%d)' % self.get_tx_height(txid).conf
else:
self.channel_status[outpoint] = 'closed (deep)'
tx = self.db.get_transaction(txid)
for i, o in enumerate(tx.outputs()):
if not self.is_mine(o.address):
self.add_address(o.address)
elif n < 2:
r = self.inspect_tx_candidate(txid+':%d'%i, n+1)
result.update(r)
return result
def get_tx_mined_depth(self, txid: str):
if not txid:
return TxMinedDepth.FREE
tx_mined_depth = self.get_tx_height(txid)
height, conf = tx_mined_depth.height, tx_mined_depth.conf
if conf > 100:
return TxMinedDepth.DEEP
elif conf > 0:
return TxMinedDepth.SHALLOW
elif height in (TX_HEIGHT_UNCONFIRMED, TX_HEIGHT_UNCONF_PARENT):
return TxMinedDepth.MEMPOOL
elif height == TX_HEIGHT_LOCAL:
return TxMinedDepth.FREE
elif height > 0 and conf == 0:
# unverified but claimed to be mined
return TxMinedDepth.MEMPOOL
else:
raise NotImplementedError()
def is_deeply_mined(self, txid):
return self.get_tx_mined_depth(txid) == TxMinedDepth.DEEP
class WatchTower(LNWatcher):
LOGGING_SHORTCUT = 'W'
def __init__(self, network):
LNWatcher.__init__(self, network)
self.network = network
self.sweepstore = SweepStore(os.path.join(self.network.config.path, "watchtower_db"), network)
# this maps funding_outpoints to ListenerItems, which have an event for when the watcher is done,
# and a queue for seeing which txs are being published
self.tx_progress = {} # type: Dict[str, ListenerItem]
async def start_watching(self):
# I need to watch the addresses from sweepstore
l = await self.sweepstore.list_channels()
for outpoint, address in l:
self.add_channel(outpoint, address)
async def do_breach_remedy(self, funding_outpoint, closing_tx, spenders):
keep_watching = False
for prevout, spender in spenders.items():
if spender is not None:
keep_watching |= not self.is_deeply_mined(spender)
continue
sweep_txns = await self.sweepstore.get_sweep_tx(funding_outpoint, prevout)
for tx in sweep_txns:
await self.broadcast_or_log(funding_outpoint, tx)
keep_watching = True
return keep_watching
async def broadcast_or_log(self, funding_outpoint: str, tx: Transaction):
height = self.get_tx_height(tx.txid()).height
if height != TX_HEIGHT_LOCAL:
return
try:
txid = await self.network.broadcast_transaction(tx)
except Exception as e:
self.logger.info(f'broadcast failure: txid={tx.txid()}, funding_outpoint={funding_outpoint}: {repr(e)}')
else:
self.logger.info(f'broadcast success: txid={tx.txid()}, funding_outpoint={funding_outpoint}')
if funding_outpoint in self.tx_progress:
await self.tx_progress[funding_outpoint].tx_queue.put(tx)
return txid
def get_ctn(self, outpoint, addr):
async def f():
return await self.sweepstore.get_ctn(outpoint, addr)
return self.network.run_from_another_thread(f())
def get_num_tx(self, outpoint):
async def f():
return await self.sweepstore.get_num_tx(outpoint)
return self.network.run_from_another_thread(f())
def list_sweep_tx(self):
async def f():
return await self.sweepstore.list_sweep_tx()
return self.network.run_from_another_thread(f())
def list_channels(self):
async def f():
return await self.sweepstore.list_channels()
return self.network.run_from_another_thread(f())
async def unwatch_channel(self, address, funding_outpoint):
await super().unwatch_channel(address, funding_outpoint)
await self.sweepstore.remove_sweep_tx(funding_outpoint)
await self.sweepstore.remove_channel(funding_outpoint)
if funding_outpoint in self.tx_progress:
self.tx_progress[funding_outpoint].all_done.set()
async def update_channel_state(self, *args, **kwargs):
pass
class LNWalletWatcher(LNWatcher):
def __init__(self, lnworker: 'LNWallet', network: 'Network'):
LNWatcher.__init__(self, network)
self.network = network
self.lnworker = lnworker
@ignore_exceptions
@log_exceptions
async def update_channel_state(self, *, funding_outpoint: str, funding_txid: str,
funding_height: TxMinedInfo, closing_txid: str,
closing_height: TxMinedInfo, keep_watching: bool) -> None:
chan = self.lnworker.channel_by_txo(funding_outpoint)
if not chan:
return
chan.update_onchain_state(funding_txid=funding_txid,
funding_height=funding_height,
closing_txid=closing_txid,
closing_height=closing_height,
keep_watching=keep_watching)
await self.lnworker.on_channel_update(chan)
async def do_breach_remedy(self, funding_outpoint, closing_tx, spenders):
chan = self.lnworker.channel_by_txo(funding_outpoint)
if not chan:
return False
# detect who closed and set sweep_info
sweep_info_dict = chan.sweep_ctx(closing_tx)
keep_watching = False if sweep_info_dict else not self.is_deeply_mined(closing_tx.txid())
self.logger.info(f'(chan {chan.get_id_for_log()}) sweep_info_dict length: {len(sweep_info_dict)}')
# create and broadcast transaction
for prevout, sweep_info in sweep_info_dict.items():
name = sweep_info.name + ' ' + chan.get_id_for_log()
spender_txid = spenders.get(prevout)
if spender_txid is not None:
spender_tx = self.db.get_transaction(spender_txid)
if not spender_tx:
keep_watching = True
continue
e_htlc_tx = chan.sweep_htlc(closing_tx, spender_tx)
if e_htlc_tx:
spender2 = spenders.get(spender_txid+':0')
if spender2:
self.logger.info(f'(chan {chan.get_id_for_log()}) htlc is already spent {name}: {prevout}')
keep_watching |= not self.is_deeply_mined(spender2)
else:
self.logger.info(f'(chan {chan.get_id_for_log()}) trying to redeem htlc {name}: {prevout}')
await self.try_redeem(spender_txid+':0', e_htlc_tx, name)
keep_watching = True
else:
self.logger.info(f'(chan {chan.get_id_for_log()}) outpoint already spent {name}: {prevout}')
keep_watching |= not self.is_deeply_mined(spender_txid)
else:
self.logger.info(f'(chan {chan.get_id_for_log()}) trying to redeem {name}: {prevout}')
await self.try_redeem(prevout, sweep_info, name)
keep_watching = True
return keep_watching
@log_exceptions
async def try_redeem(self, prevout: str, sweep_info: 'SweepInfo', name: str) -> None:
prev_txid, prev_index = prevout.split(':')
broadcast = True
if sweep_info.cltv_expiry:
local_height = self.network.get_local_height()
remaining = sweep_info.cltv_expiry - local_height
if remaining > 0:
self.logger.info('waiting for {}: CLTV ({} > {}), prevout {}'
.format(name, local_height, sweep_info.cltv_expiry, prevout))
broadcast = False
if sweep_info.csv_delay:
prev_height = self.get_tx_height(prev_txid)
remaining = sweep_info.csv_delay - prev_height.conf
if remaining > 0:
self.logger.info('waiting for {}: CSV ({} >= {}), prevout: {}'
.format(name, prev_height.conf, sweep_info.csv_delay, prevout))
broadcast = False
tx = sweep_info.gen_tx()
if tx is None:
self.logger.info(f'{name} could not claim output: {prevout}, dust')
self.lnworker.wallet.set_label(tx.txid(), name)
if broadcast:
await self.network.try_broadcasting(tx, name)
else:
# it's OK to add local transaction, the fee will be recomputed
try:
tx_was_added = self.lnworker.wallet.add_future_tx(tx, remaining)
except Exception as e:
self.logger.info(f'could not add future tx: {name}. prevout: {prevout} {str(e)}')
tx_was_added = False
if tx_was_added:
self.logger.info(f'added future tx: {name}. prevout: {prevout}')
util.trigger_callback('wallet_updated', self.lnworker.wallet)
| mit |
izzi/ActivityTracker | tests/testActivity.py | 1 | 2088 | '''
Created on Aug 20, 2014
@author: valeriu
'''
import unittest
from datetime import datetime, timedelta
from activitytracker import Activity, ActivityError
class TestActivity(unittest.TestCase):
def setUp(self):
self.activity = Activity([])
def tearDown(self):
self.activity = None
def test_constructor_parameter_storage(self):
self.assertEqual(self.activity._window_list, [], "Constructor parameter windows is ignored")
def test_constructor_parameter_validation(self):
self.assertRaises(ActivityError, Activity, [{}])
def test_validate_windows_empty_list_parameter(self):
self.assertRaises(ActivityError, self.activity.validate_windows, 3)
def test_validate_windows_list_of_dictionary_parameter(self):
self.assertRaises(ActivityError, self.activity.validate_windows, [3])
def test_validate_windows_list_dictionary_keys(self):
self.assertRaises(ActivityError, self.activity.validate_windows, [{}])
def test_constructor_default_start_time(self):
self.assertGreater (self.activity._start_time, datetime.now() - timedelta(seconds=1), "startTime in constructor not initialized wit current time")
self.assertLess (self.activity._start_time, datetime.now() + timedelta(seconds=1), "startTime in constructor not initialized wit current time")
def test_constructor_default_duration(self):
self.assertEqual(self.activity._duration, 0, "Default duration in constructor must be 0")
def test_increment_duration_param(self):
self.activity.increment_duration(2)
self.assertEqual(self.activity._duration, 2, "Function ignore parameter value")
def test_increment_duration_default_param(self):
self.activity.increment_duration()
self.assertEqual(self.activity._duration, 1, "Default increment must be 1")
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | gpl-3.0 |
neumerance/cloudloon2 | .venv/lib/python2.7/site-packages/nose/selector.py | 48 | 9090 | """
Test Selection
--------------
Test selection is handled by a Selector. The test loader calls the
appropriate selector method for each object it encounters that it
thinks may be a test.
"""
import logging
import os
import unittest
from nose.config import Config
from nose.util import split_test_name, src, getfilename, getpackage, ispackage
log = logging.getLogger(__name__)
__all__ = ['Selector', 'defaultSelector', 'TestAddress']
# for efficiency and easier mocking
op_join = os.path.join
op_basename = os.path.basename
op_exists = os.path.exists
op_splitext = os.path.splitext
op_isabs = os.path.isabs
op_abspath = os.path.abspath
class Selector(object):
"""Core test selector. Examines test candidates and determines whether,
given the specified configuration, the test candidate should be selected
as a test.
"""
def __init__(self, config):
if config is None:
config = Config()
self.configure(config)
def configure(self, config):
self.config = config
self.exclude = config.exclude
self.ignoreFiles = config.ignoreFiles
self.include = config.include
self.plugins = config.plugins
self.match = config.testMatch
def matches(self, name):
"""Does the name match my requirements?
To match, a name must match config.testMatch OR config.include
and it must not match config.exclude
"""
return ((self.match.search(name)
or (self.include and
filter(None,
[inc.search(name) for inc in self.include])))
and ((not self.exclude)
or not filter(None,
[exc.search(name) for exc in self.exclude])
))
def wantClass(self, cls):
"""Is the class a wanted test class?
A class must be a unittest.TestCase subclass, or match test name
requirements. Classes that start with _ are always excluded.
"""
declared = getattr(cls, '__test__', None)
if declared is not None:
wanted = declared
else:
wanted = (not cls.__name__.startswith('_')
and (issubclass(cls, unittest.TestCase)
or self.matches(cls.__name__)))
plug_wants = self.plugins.wantClass(cls)
if plug_wants is not None:
log.debug("Plugin setting selection of %s to %s", cls, plug_wants)
wanted = plug_wants
log.debug("wantClass %s? %s", cls, wanted)
return wanted
def wantDirectory(self, dirname):
"""Is the directory a wanted test directory?
All package directories match, so long as they do not match exclude.
All other directories must match test requirements.
"""
tail = op_basename(dirname)
if ispackage(dirname):
wanted = (not self.exclude
or not filter(None,
[exc.search(tail) for exc in self.exclude]
))
else:
wanted = (self.matches(tail)
or (self.config.srcDirs
and tail in self.config.srcDirs))
plug_wants = self.plugins.wantDirectory(dirname)
if plug_wants is not None:
log.debug("Plugin setting selection of %s to %s",
dirname, plug_wants)
wanted = plug_wants
log.debug("wantDirectory %s? %s", dirname, wanted)
return wanted
def wantFile(self, file):
"""Is the file a wanted test file?
The file must be a python source file and match testMatch or
include, and not match exclude. Files that match ignore are *never*
wanted, regardless of plugin, testMatch, include or exclude settings.
"""
# never, ever load files that match anything in ignore
# (.* _* and *setup*.py by default)
base = op_basename(file)
ignore_matches = [ ignore_this for ignore_this in self.ignoreFiles
if ignore_this.search(base) ]
if ignore_matches:
log.debug('%s matches ignoreFiles pattern; skipped',
base)
return False
if not self.config.includeExe and os.access(file, os.X_OK):
log.info('%s is executable; skipped', file)
return False
dummy, ext = op_splitext(base)
pysrc = ext == '.py'
wanted = pysrc and self.matches(base)
plug_wants = self.plugins.wantFile(file)
if plug_wants is not None:
log.debug("plugin setting want %s to %s", file, plug_wants)
wanted = plug_wants
log.debug("wantFile %s? %s", file, wanted)
return wanted
def wantFunction(self, function):
"""Is the function a test function?
"""
try:
if hasattr(function, 'compat_func_name'):
funcname = function.compat_func_name
else:
funcname = function.__name__
except AttributeError:
# not a function
return False
declared = getattr(function, '__test__', None)
if declared is not None:
wanted = declared
else:
wanted = not funcname.startswith('_') and self.matches(funcname)
plug_wants = self.plugins.wantFunction(function)
if plug_wants is not None:
wanted = plug_wants
log.debug("wantFunction %s? %s", function, wanted)
return wanted
def wantMethod(self, method):
"""Is the method a test method?
"""
try:
method_name = method.__name__
except AttributeError:
# not a method
return False
if method_name.startswith('_'):
# never collect 'private' methods
return False
declared = getattr(method, '__test__', None)
if declared is not None:
wanted = declared
else:
wanted = self.matches(method_name)
plug_wants = self.plugins.wantMethod(method)
if plug_wants is not None:
wanted = plug_wants
log.debug("wantMethod %s? %s", method, wanted)
return wanted
def wantModule(self, module):
"""Is the module a test module?
The tail of the module name must match test requirements. One exception:
we always want __main__.
"""
declared = getattr(module, '__test__', None)
if declared is not None:
wanted = declared
else:
wanted = self.matches(module.__name__.split('.')[-1]) \
or module.__name__ == '__main__'
plug_wants = self.plugins.wantModule(module)
if plug_wants is not None:
wanted = plug_wants
log.debug("wantModule %s? %s", module, wanted)
return wanted
defaultSelector = Selector
class TestAddress(object):
"""A test address represents a user's request to run a particular
test. The user may specify a filename or module (or neither),
and/or a callable (a class, function, or method). The naming
format for test addresses is:
filename_or_module:callable
Filenames that are not absolute will be made absolute relative to
the working dir.
The filename or module part will be considered a module name if it
doesn't look like a file, that is, if it doesn't exist on the file
system and it doesn't contain any directory separators and it
doesn't end in .py.
Callables may be a class name, function name, method name, or
class.method specification.
"""
def __init__(self, name, workingDir=None):
if workingDir is None:
workingDir = os.getcwd()
self.name = name
self.workingDir = workingDir
self.filename, self.module, self.call = split_test_name(name)
log.debug('Test name %s resolved to file %s, module %s, call %s',
name, self.filename, self.module, self.call)
if self.filename is None:
if self.module is not None:
self.filename = getfilename(self.module, self.workingDir)
if self.filename:
self.filename = src(self.filename)
if not op_isabs(self.filename):
self.filename = op_abspath(op_join(workingDir,
self.filename))
if self.module is None:
self.module = getpackage(self.filename)
log.debug(
'Final resolution of test name %s: file %s module %s call %s',
name, self.filename, self.module, self.call)
def totuple(self):
return (self.filename, self.module, self.call)
def __str__(self):
return self.name
def __repr__(self):
return "%s: (%s, %s, %s)" % (self.name, self.filename,
self.module, self.call)
| apache-2.0 |
h3biomed/ansible | lib/ansible/modules/network/ingate/ig_config.py | 16 | 15968 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Ingate Systems AB
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: ig_config
short_description: Manage the configuration database on an Ingate SBC.
description:
- Manage the configuration database on an Ingate SBC.
version_added: 2.8
extends_documentation_fragment: ingate
options:
add:
description:
- Add a row to a table.
type: bool
delete:
description:
- Delete all rows in a table or a specific row.
type: bool
get:
description:
- Return all rows in a table or a specific row.
type: bool
modify:
description:
- Modify a row in a table.
type: bool
revert:
description:
- Reset the preliminary configuration.
type: bool
factory:
description:
- Reset the preliminary configuration to its factory defaults.
type: bool
store:
description:
- Store the preliminary configuration.
type: bool
no_response:
description:
- Expect no response when storing the preliminary configuration.
Refer to the C(store) option.
type: bool
return_rowid:
description:
- Get rowid(s) from a table where the columns match.
type: bool
download:
description:
- Download the configuration database from the unit.
type: bool
store_download:
description:
- If the downloaded configuration should be stored on disk.
Refer to the C(download) option.
type: bool
default: false
path:
description:
- Where in the filesystem to store the downloaded configuration.
Refer to the C(download) option.
filename:
description:
- The name of the file to store the downloaded configuration in.
Refer to the C(download) option.
table:
description:
- The name of the table.
rowid:
description:
- A row id.
type: int
columns:
description:
- A dict containing column names/values.
notes:
- If C(store_download) is set to True, and C(path) and C(filename) is omitted,
the file will be stored in the current directory with an automatic filename.
author:
- Ingate Systems AB (@ingatesystems)
'''
EXAMPLES = '''
- name: Add/remove DNS servers
hosts: 192.168.1.1
connection: local
vars:
client_rw:
version: v1
address: "{{ inventory_hostname }}"
scheme: http
username: alice
password: foobar
tasks:
- name: Load factory defaults
ig_config:
client: "{{ client_rw }}"
factory: true
register: result
- debug:
var: result
- name: Revert to last known applied configuration
ig_config:
client: "{{ client_rw }}"
revert: true
register: result
- debug:
var: result
- name: Change the unit name
ig_config:
client: "{{ client_rw }}"
modify: true
table: misc.unitname
columns:
unitname: "Test Ansible"
register: result
- debug:
var: result
- name: Add a DNS server
ig_config:
client: "{{ client_rw }}"
add: true
table: misc.dns_servers
columns:
server: 192.168.1.21
register: result
- debug:
var: result
- name: Add a DNS server
ig_config:
client: "{{ client_rw }}"
add: true
table: misc.dns_servers
columns:
server: 192.168.1.22
register: result
- debug:
var: result
- name: Add a DNS server
ig_config:
client: "{{ client_rw }}"
add: true
table: misc.dns_servers
columns:
server: 192.168.1.23
register: last_dns
- debug:
var: last_dns
- name: Modify the last added DNS server
ig_config:
client: "{{ client_rw }}"
modify: true
table: misc.dns_servers
rowid: "{{ last_dns['add'][0]['id'] }}"
columns:
server: 192.168.1.24
register: result
- debug:
var: result
- name: Return the last added DNS server
ig_config:
client: "{{ client_rw }}"
get: true
table: misc.dns_servers
rowid: "{{ last_dns['add'][0]['id'] }}"
register: result
- debug:
var: result
- name: Remove last added DNS server
ig_config:
client: "{{ client_rw }}"
delete: true
table: misc.dns_servers
rowid: "{{ last_dns['add'][0]['id'] }}"
register: result
- debug:
var: result
- name: Return the all rows from table misc.dns_servers
ig_config:
client: "{{ client_rw }}"
get: true
table: misc.dns_servers
register: result
- debug:
var: result
- name: Remove remaining DNS servers
ig_config:
client: "{{ client_rw }}"
delete: true
table: misc.dns_servers
register: result
- debug:
var: result
- name: Get rowid for interface eth0
ig_config:
client: "{{ client_rw }}"
return_rowid: true
table: network.local_nets
columns:
interface: eth0
register: result
- debug:
var: result
- name: Store the preliminary configuration
ig_config:
client: "{{ client_rw }}"
store: true
register: result
- debug:
var: result
- name: Do backup of the configuration database
ig_config:
client: "{{ client_rw }}"
download: true
store_download: true
register: result
- debug:
var: result
'''
RETURN = '''
add:
description: A list containing information about the added row
returned: when C(add) is yes and success
type: complex
contains:
href:
description: The REST API URL to the added row
returned: success
type: str
sample: http://192.168.1.1/api/v1/misc/dns_servers/2
data:
description: Column names/values
returned: success
type: complex
sample: {'number': '2', 'server': '10.48.254.33'}
id:
description: The row id
returned: success
type: int
sample: 22
delete:
description: A list containing information about the deleted row(s)
returned: when C(delete) is yes and success
type: complex
contains:
table:
description: The name of the table
returned: success
type: str
sample: misc.dns_servers
data:
description: Column names/values
returned: success
type: complex
sample: {'number': '2', 'server': '10.48.254.33'}
id:
description: The row id
returned: success
type: int
sample: 22
get:
description: A list containing information about the row(s)
returned: when C(get) is yes and success
type: complex
contains:
table:
description: The name of the table
returned: success
type: str
sample: Testname
href:
description: The REST API URL to the row
returned: success
type: str
sample: http://192.168.1.1/api/v1/misc/dns_servers/1
data:
description: Column names/values
returned: success
type: complex
sample: {'number': '2', 'server': '10.48.254.33'}
id:
description: The row id
returned: success
type: int
sample: 1
modify:
description: A list containing information about the modified row
returned: when C(modify) is yes and success
type: complex
contains:
table:
description: The name of the table
returned: success
type: str
sample: Testname
href:
description: The REST API URL to the modified row
returned: success
type: str
sample: http://192.168.1.1/api/v1/misc/dns_servers/1
data:
description: Column names/values
returned: success
type: complex
sample: {'number': '2', 'server': '10.48.254.33'}
id:
description: The row id
returned: success
type: int
sample: 10
revert:
description: A command status message
returned: when C(revert) is yes and success
type: complex
contains:
msg:
description: The command status message
returned: success
type: str
sample: reverted the configuration to the last applied configuration.
factory:
description: A command status message
returned: when C(factory) is yes and success
type: complex
contains:
msg:
description: The command status message
returned: success
type: str
sample: reverted the configuration to the factory configuration.
store:
description: A command status message
returned: when C(store) is yes and success
type: complex
contains:
msg:
description: The command status message
returned: success
type: str
sample: Successfully applied and saved the configuration.
return_rowid:
description: The matched row id(s).
returned: when C(return_rowid) is yes and success
type: list
sample: [1, 3]
download:
description: Configuration database and meta data
returned: when C(download) is yes and success
type: complex
contains:
config:
description: The configuration database
returned: success
type: str
filename:
description: A suggested name for the configuration
returned: success
type: str
sample: testname_2018-10-01T214040.cfg
mimetype:
description: The mimetype
returned: success
type: str
sample: application/x-config-database
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.ingate.common import (ingate_argument_spec,
ingate_create_client)
try:
from ingate import ingatesdk
HAS_INGATESDK = True
except ImportError:
HAS_INGATESDK = False
def make_request(module):
# Create client and authenticate.
api_client = ingate_create_client(**module.params)
if module.params.get('add'):
# Add a row to a table.
table = module.params['table']
columns = module.params['columns']
response = api_client.add_row(table, **columns)
return True, 'add', response
elif module.params.get('delete'):
# Delete a row/table.
changed = False
table = module.params['table']
rowid = module.params.get('rowid')
if rowid:
response = api_client.delete_row(table, rowid=rowid)
else:
response = api_client.delete_table(table)
if response:
changed = True
return changed, 'delete', response
elif module.params.get('get'):
# Get the contents of a table/row.
table = module.params['table']
rowid = module.params.get('rowid')
if rowid:
response = api_client.dump_row(table, rowid=rowid)
else:
response = api_client.dump_table(table)
if response:
changed = True
return changed, 'get', response
elif module.params.get('modify'):
# Modify a table row.
table = module.params['table']
columns = module.params['columns']
rowid = module.params.get('rowid')
if rowid:
response = api_client.modify_row(table, rowid=rowid, **columns)
else:
response = api_client.modify_single_row(table, **columns)
if response:
changed = True
return changed, 'modify', response
elif module.params.get('revert'):
# Revert edits.
response = api_client.revert_edits()
if response:
response = response[0]['revert-edits']
return True, 'revert', response
elif module.params.get('factory'):
# Load factory defaults.
response = api_client.load_factory()
if response:
response = response[0]['load-factory']
return True, 'factory', response
elif module.params.get('store'):
# Store edit.
no_response = module.params.get('no_response')
response = api_client.store_edit(no_response=no_response)
if response:
response = response[0]['store-edit']
return True, 'store', response
elif module.params.get('return_rowid'):
# Find matching rowid(s) in a table.
table = module.params['table']
columns = module.params['columns']
response = api_client.dump_table(table)
rowids = []
for row in response:
match = False
for (name, value) in columns.items():
if name not in row['data']:
continue
if not row['data'][name] == value:
match = False
break
else:
match = True
if match:
rowids.append(row['id'])
return False, 'return_rowid', rowids
elif module.params.get('download'):
# Download the configuration database.
store = module.params.get('store_download')
path = module.params.get('path')
filename = module.params.get('filename')
response = api_client.download_config(store=store, path=path,
filename=filename)
if response:
response = response[0]['download-config']
return False, 'download', response
return False, '', {}
def main():
argument_spec = ingate_argument_spec(
add=dict(type='bool'),
delete=dict(type='bool'),
get=dict(type='bool'),
modify=dict(type='bool'),
revert=dict(type='bool'),
factory=dict(type='bool'),
store=dict(type='bool'),
no_response=dict(type='bool', default=False),
return_rowid=dict(type='bool'),
download=dict(type='bool'),
store_download=dict(type='bool', default=False),
path=dict(),
filename=dict(),
table=dict(),
rowid=dict(type='int'),
columns=dict(type='dict'),
)
mutually_exclusive = [('add', 'delete', 'get', 'modify', 'revert',
'factory', 'store', 'return_rowid', 'download')]
required_one_of = [['add', 'delete', 'get', 'modify', 'revert', 'factory',
'store', 'return_rowid', 'download']]
required_if = [('add', True, ['table', 'columns']),
('delete', True, ['table']),
('get', True, ['table']),
('modify', True, ['table', 'columns']),
('return_rowid', True, ['table', 'columns'])]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
required_one_of=required_one_of,
supports_check_mode=False)
if not HAS_INGATESDK:
module.fail_json(msg='The Ingate Python SDK module is required')
result = dict(changed=False)
try:
changed, command, response = make_request(module)
if response and command:
result[command] = response
result['changed'] = changed
except ingatesdk.SdkError as e:
module.fail_json(msg=str(e))
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
gnu-sandhi/gnuradio | grc/gui/Connection.py | 34 | 6129 | """
Copyright 2007, 2008, 2009 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
import Utils
from Element import Element
import Colors
from Constants import CONNECTOR_ARROW_BASE, CONNECTOR_ARROW_HEIGHT
class Connection(Element):
"""
A graphical connection for ports.
The connection has 2 parts, the arrow and the wire.
The coloring of the arrow and wire exposes the status of 3 states:
enabled/disabled, valid/invalid, highlighted/non-highlighted.
The wire coloring exposes the enabled and highlighted states.
The arrow coloring exposes the enabled and valid states.
"""
def __init__(self): Element.__init__(self)
def get_coordinate(self):
"""
Get the 0,0 coordinate.
Coordinates are irrelevant in connection.
@return 0, 0
"""
return (0, 0)
def get_rotation(self):
"""
Get the 0 degree rotation.
Rotations are irrelevant in connection.
@return 0
"""
return 0
def create_shapes(self):
"""Precalculate relative coordinates."""
Element.create_shapes(self)
self._sink_rot = None
self._source_rot = None
self._sink_coor = None
self._source_coor = None
#get the source coordinate
connector_length = self.get_source().get_connector_length()
self.x1, self.y1 = Utils.get_rotated_coordinate((connector_length, 0), self.get_source().get_rotation())
#get the sink coordinate
connector_length = self.get_sink().get_connector_length() + CONNECTOR_ARROW_HEIGHT
self.x2, self.y2 = Utils.get_rotated_coordinate((-connector_length, 0), self.get_sink().get_rotation())
#build the arrow
self.arrow = [(0, 0),
Utils.get_rotated_coordinate((-CONNECTOR_ARROW_HEIGHT, -CONNECTOR_ARROW_BASE/2), self.get_sink().get_rotation()),
Utils.get_rotated_coordinate((-CONNECTOR_ARROW_HEIGHT, CONNECTOR_ARROW_BASE/2), self.get_sink().get_rotation()),
]
self._update_after_move()
if not self.get_enabled(): self._arrow_color = Colors.CONNECTION_DISABLED_COLOR
elif not self.is_valid(): self._arrow_color = Colors.CONNECTION_ERROR_COLOR
else: self._arrow_color = Colors.CONNECTION_ENABLED_COLOR
def _update_after_move(self):
"""Calculate coordinates."""
self.clear() #FIXME do i want this here?
#source connector
source = self.get_source()
X, Y = source.get_connector_coordinate()
x1, y1 = self.x1 + X, self.y1 + Y
self.add_line((x1, y1), (X, Y))
#sink connector
sink = self.get_sink()
X, Y = sink.get_connector_coordinate()
x2, y2 = self.x2 + X, self.y2 + Y
self.add_line((x2, y2), (X, Y))
#adjust arrow
self._arrow = [(x+X, y+Y) for x,y in self.arrow]
#add the horizontal and vertical lines in this connection
if abs(source.get_connector_direction() - sink.get_connector_direction()) == 180:
#2 possible point sets to create a 3-line connector
mid_x, mid_y = (x1 + x2)/2.0, (y1 + y2)/2.0
points = [((mid_x, y1), (mid_x, y2)), ((x1, mid_y), (x2, mid_y))]
#source connector -> points[0][0] should be in the direction of source (if possible)
if Utils.get_angle_from_coordinates((x1, y1), points[0][0]) != source.get_connector_direction(): points.reverse()
#points[0][0] -> sink connector should not be in the direction of sink
if Utils.get_angle_from_coordinates(points[0][0], (x2, y2)) == sink.get_connector_direction(): points.reverse()
#points[0][0] -> source connector should not be in the direction of source
if Utils.get_angle_from_coordinates(points[0][0], (x1, y1)) == source.get_connector_direction(): points.reverse()
#create 3-line connector
p1, p2 = map(int, points[0][0]), map(int, points[0][1])
self.add_line((x1, y1), p1)
self.add_line(p1, p2)
self.add_line((x2, y2), p2)
else:
#2 possible points to create a right-angled connector
points = [(x1, y2), (x2, y1)]
#source connector -> points[0] should be in the direction of source (if possible)
if Utils.get_angle_from_coordinates((x1, y1), points[0]) != source.get_connector_direction(): points.reverse()
#points[0] -> sink connector should not be in the direction of sink
if Utils.get_angle_from_coordinates(points[0], (x2, y2)) == sink.get_connector_direction(): points.reverse()
#points[0] -> source connector should not be in the direction of source
if Utils.get_angle_from_coordinates(points[0], (x1, y1)) == source.get_connector_direction(): points.reverse()
#create right-angled connector
self.add_line((x1, y1), points[0])
self.add_line((x2, y2), points[0])
def draw(self, gc, window):
"""
Draw the connection.
@param gc the graphics context
@param window the gtk window to draw on
"""
sink = self.get_sink()
source = self.get_source()
#check for changes
if self._sink_rot != sink.get_rotation() or self._source_rot != source.get_rotation(): self.create_shapes()
elif self._sink_coor != sink.get_coordinate() or self._source_coor != source.get_coordinate(): self._update_after_move()
#cache values
self._sink_rot = sink.get_rotation()
self._source_rot = source.get_rotation()
self._sink_coor = sink.get_coordinate()
self._source_coor = source.get_coordinate()
#draw
if self.is_highlighted(): border_color = Colors.HIGHLIGHT_COLOR
elif self.get_enabled(): border_color = Colors.CONNECTION_ENABLED_COLOR
else: border_color = Colors.CONNECTION_DISABLED_COLOR
Element.draw(self, gc, window, bg_color=None, border_color=border_color)
#draw arrow on sink port
gc.set_foreground(self._arrow_color)
window.draw_polygon(gc, True, self._arrow)
| gpl-3.0 |
Dziolas/invenio | modules/miscutil/lib/miscutil_config.py | 27 | 1264 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" Configuration file for miscutil module.
- Contains standard error messages for errorlib
e.g. No error message given, etc.
"""
__revision__ = "$Id$"
# Exceptions: errors
class InvenioMiscUtilError(Exception):
"""A generic error for MiscUtil."""
def __init__(self, message):
"""Initialisation."""
self.message = message
def __str__(self):
"""String representation."""
return repr(self.message)
| gpl-2.0 |
tako0910/android_kernel_htc_m7wlj | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 |
abhisg/scikit-learn | examples/decomposition/plot_pca_3d.py | 354 | 2432 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
| bsd-3-clause |
kadashu/satori | satori-rules/plugin/libs/requests/packages/chardet/chardistribution.py | 2755 | 9226 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .euctwfreq import (EUCTWCharToFreqOrder, EUCTW_TABLE_SIZE,
EUCTW_TYPICAL_DISTRIBUTION_RATIO)
from .euckrfreq import (EUCKRCharToFreqOrder, EUCKR_TABLE_SIZE,
EUCKR_TYPICAL_DISTRIBUTION_RATIO)
from .gb2312freq import (GB2312CharToFreqOrder, GB2312_TABLE_SIZE,
GB2312_TYPICAL_DISTRIBUTION_RATIO)
from .big5freq import (Big5CharToFreqOrder, BIG5_TABLE_SIZE,
BIG5_TYPICAL_DISTRIBUTION_RATIO)
from .jisfreq import (JISCharToFreqOrder, JIS_TABLE_SIZE,
JIS_TYPICAL_DISTRIBUTION_RATIO)
from .compat import wrap_ord
ENOUGH_DATA_THRESHOLD = 1024
SURE_YES = 0.99
SURE_NO = 0.01
MINIMUM_DATA_THRESHOLD = 3
class CharDistributionAnalysis:
def __init__(self):
# Mapping table to get frequency order from char order (get from
# GetOrder())
self._mCharToFreqOrder = None
self._mTableSize = None # Size of above table
# This is a constant value which varies from language to language,
# used in calculating confidence. See
# http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
# for further detail.
self._mTypicalDistributionRatio = None
self.reset()
def reset(self):
"""reset analyser, clear any state"""
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
self._mTotalChars = 0 # Total characters encountered
# The number of characters whose frequency order is less than 512
self._mFreqChars = 0
def feed(self, aBuf, aCharLen):
"""feed a character with known length"""
if aCharLen == 2:
# we only care about 2-bytes character in our distribution analysis
order = self.get_order(aBuf)
else:
order = -1
if order >= 0:
self._mTotalChars += 1
# order is valid
if order < self._mTableSize:
if 512 > self._mCharToFreqOrder[order]:
self._mFreqChars += 1
def get_confidence(self):
"""return confidence based on existing data"""
# if we didn't receive any character in our consideration range,
# return negative answer
if self._mTotalChars <= 0 or self._mFreqChars <= MINIMUM_DATA_THRESHOLD:
return SURE_NO
if self._mTotalChars != self._mFreqChars:
r = (self._mFreqChars / ((self._mTotalChars - self._mFreqChars)
* self._mTypicalDistributionRatio))
if r < SURE_YES:
return r
# normalize confidence (we don't want to be 100% sure)
return SURE_YES
def got_enough_data(self):
# It is not necessary to receive all data to draw conclusion.
# For charset detection, certain amount of data is enough
return self._mTotalChars > ENOUGH_DATA_THRESHOLD
def get_order(self, aBuf):
# We do not handle characters based on the original encoding string,
# but convert this encoding string to a number, here called order.
# This allows multiple encodings of a language to share one frequency
# table.
return -1
class EUCTWDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCTWCharToFreqOrder
self._mTableSize = EUCTW_TABLE_SIZE
self._mTypicalDistributionRatio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-TW encoding, we are interested
# first byte range: 0xc4 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xC4:
return 94 * (first_char - 0xC4) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class EUCKRDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCKRCharToFreqOrder
self._mTableSize = EUCKR_TABLE_SIZE
self._mTypicalDistributionRatio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-KR encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xB0:
return 94 * (first_char - 0xB0) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class GB2312DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = GB2312CharToFreqOrder
self._mTableSize = GB2312_TABLE_SIZE
self._mTypicalDistributionRatio = GB2312_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for GB2312 encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0xB0) and (second_char >= 0xA1):
return 94 * (first_char - 0xB0) + second_char - 0xA1
else:
return -1
class Big5DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = Big5CharToFreqOrder
self._mTableSize = BIG5_TABLE_SIZE
self._mTypicalDistributionRatio = BIG5_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for big5 encoding, we are interested
# first byte range: 0xa4 -- 0xfe
# second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if first_char >= 0xA4:
if second_char >= 0xA1:
return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
else:
return 157 * (first_char - 0xA4) + second_char - 0x40
else:
return -1
class SJISDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for sjis encoding, we are interested
# first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
# second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0x81) and (first_char <= 0x9F):
order = 188 * (first_char - 0x81)
elif (first_char >= 0xE0) and (first_char <= 0xEF):
order = 188 * (first_char - 0xE0 + 31)
else:
return -1
order = order + second_char - 0x40
if second_char > 0x7F:
order = -1
return order
class EUCJPDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-JP encoding, we are interested
# first byte range: 0xa0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
char = wrap_ord(aBuf[0])
if char >= 0xA0:
return 94 * (char - 0xA1) + wrap_ord(aBuf[1]) - 0xa1
else:
return -1
| apache-2.0 |
fhaoquan/kbengine | kbe/res/scripts/common/Lib/logging/handlers.py | 63 | 55810 | # Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Additional handlers for the logging package for Python. The core package is
based on PEP 282 and comments thereto in comp.lang.python.
Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging.handlers' and log away!
"""
import logging, socket, os, pickle, struct, time, re
from stat import ST_DEV, ST_INO, ST_MTIME
import queue
try:
import threading
except ImportError: #pragma: no cover
threading = None
#
# Some constants...
#
DEFAULT_TCP_LOGGING_PORT = 9020
DEFAULT_UDP_LOGGING_PORT = 9021
DEFAULT_HTTP_LOGGING_PORT = 9022
DEFAULT_SOAP_LOGGING_PORT = 9023
SYSLOG_UDP_PORT = 514
SYSLOG_TCP_PORT = 514
_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
class BaseRotatingHandler(logging.FileHandler):
"""
Base class for handlers that rotate log files at a certain point.
Not meant to be instantiated directly. Instead, use RotatingFileHandler
or TimedRotatingFileHandler.
"""
def __init__(self, filename, mode, encoding=None, delay=False):
"""
Use the specified filename for streamed logging
"""
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
self.mode = mode
self.encoding = encoding
self.namer = None
self.rotator = None
def emit(self, record):
"""
Emit a record.
Output the record to the file, catering for rollover as described
in doRollover().
"""
try:
if self.shouldRollover(record):
self.doRollover()
logging.FileHandler.emit(self, record)
except Exception:
self.handleError(record)
def rotation_filename(self, default_name):
"""
Modify the filename of a log file when rotating.
This is provided so that a custom filename can be provided.
The default implementation calls the 'namer' attribute of the
handler, if it's callable, passing the default name to
it. If the attribute isn't callable (the default is None), the name
is returned unchanged.
:param default_name: The default name for the log file.
"""
if not callable(self.namer):
result = default_name
else:
result = self.namer(default_name)
return result
def rotate(self, source, dest):
"""
When rotating, rotate the current log.
The default implementation calls the 'rotator' attribute of the
handler, if it's callable, passing the source and dest arguments to
it. If the attribute isn't callable (the default is None), the source
is simply renamed to the destination.
:param source: The source filename. This is normally the base
filename, e.g. 'test.log'
:param dest: The destination filename. This is normally
what the source is rotated to, e.g. 'test.log.1'.
"""
if not callable(self.rotator):
# Issue 18940: A file may not have been created if delay is True.
if os.path.exists(source):
os.rename(source, dest)
else:
self.rotator(source, dest)
class RotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a set of files, which switches from one file
to the next when the current file reaches a certain size.
"""
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=False):
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
"""
# If rotation/rollover is wanted, it doesn't make sense to use another
# mode. If for example 'w' were specified, then if there were multiple
# runs of the calling application, the logs from previous runs would be
# lost if the 'w' is respected, because the log file would be truncated
# on each run.
if maxBytes > 0:
mode = 'a'
BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
self.maxBytes = maxBytes
self.backupCount = backupCount
def doRollover(self):
"""
Do a rollover, as described in __init__().
"""
if self.stream:
self.stream.close()
self.stream = None
if self.backupCount > 0:
for i in range(self.backupCount - 1, 0, -1):
sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i))
dfn = self.rotation_filename("%s.%d" % (self.baseFilename,
i + 1))
if os.path.exists(sfn):
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.rotation_filename(self.baseFilename + ".1")
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if not self.delay:
self.stream = self._open()
def shouldRollover(self, record):
"""
Determine if rollover should occur.
Basically, see if the supplied record would cause the file to exceed
the size limit we have.
"""
if self.stream is None: # delay was set...
self.stream = self._open()
if self.maxBytes > 0: # are we rolling over?
msg = "%s\n" % self.format(record)
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
if self.stream.tell() + len(msg) >= self.maxBytes:
return 1
return 0
class TimedRotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a file, rotating the log file at certain timed
intervals.
If backupCount is > 0, when rollover is done, no more than backupCount
files are kept - the oldest ones are deleted.
"""
def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False, atTime=None):
BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
self.when = when.upper()
self.backupCount = backupCount
self.utc = utc
self.atTime = atTime
# Calculate the real rollover interval, which is just the number of
# seconds between rollovers. Also set the filename suffix used when
# a rollover occurs. Current 'when' events supported:
# S - Seconds
# M - Minutes
# H - Hours
# D - Days
# midnight - roll over at midnight
# W{0-6} - roll over on a certain day; 0 - Monday
#
# Case of the 'when' specifier is not important; lower or upper case
# will work.
if self.when == 'S':
self.interval = 1 # one second
self.suffix = "%Y-%m-%d_%H-%M-%S"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$"
elif self.when == 'M':
self.interval = 60 # one minute
self.suffix = "%Y-%m-%d_%H-%M"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$"
elif self.when == 'H':
self.interval = 60 * 60 # one hour
self.suffix = "%Y-%m-%d_%H"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$"
elif self.when == 'D' or self.when == 'MIDNIGHT':
self.interval = 60 * 60 * 24 # one day
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
elif self.when.startswith('W'):
self.interval = 60 * 60 * 24 * 7 # one week
if len(self.when) != 2:
raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
if self.when[1] < '0' or self.when[1] > '6':
raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
self.dayOfWeek = int(self.when[1])
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
else:
raise ValueError("Invalid rollover interval specified: %s" % self.when)
self.extMatch = re.compile(self.extMatch, re.ASCII)
self.interval = self.interval * interval # multiply by units requested
if os.path.exists(filename):
t = os.stat(filename)[ST_MTIME]
else:
t = int(time.time())
self.rolloverAt = self.computeRollover(t)
def computeRollover(self, currentTime):
"""
Work out the rollover time based on the specified time.
"""
result = currentTime + self.interval
# If we are rolling over at midnight or weekly, then the interval is already known.
# What we need to figure out is WHEN the next interval is. In other words,
# if you are rolling over at midnight, then your base interval is 1 day,
# but you want to start that one day clock at midnight, not now. So, we
# have to fudge the rolloverAt value in order to trigger the first rollover
# at the right time. After that, the regular interval will take care of
# the rest. Note that this code doesn't care about leap seconds. :)
if self.when == 'MIDNIGHT' or self.when.startswith('W'):
# This could be done with less code, but I wanted it to be clear
if self.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
currentDay = t[6]
# r is the number of seconds left between now and the next rotation
if self.atTime is None:
rotate_ts = _MIDNIGHT
else:
rotate_ts = ((self.atTime.hour * 60 + self.atTime.minute)*60 +
self.atTime.second)
r = rotate_ts - ((currentHour * 60 + currentMinute) * 60 +
currentSecond)
if r < 0:
# Rotate time is before the current time (for example when
# self.rotateAt is 13:45 and it now 14:15), rotation is
# tomorrow.
r += _MIDNIGHT
currentDay = (currentDay + 1) % 7
result = currentTime + r
# If we are rolling over on a certain day, add in the number of days until
# the next rollover, but offset by 1 since we just calculated the time
# until the next day starts. There are three cases:
# Case 1) The day to rollover is today; in this case, do nothing
# Case 2) The day to rollover is further in the interval (i.e., today is
# day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
# next rollover is simply 6 - 2 - 1, or 3.
# Case 3) The day to rollover is behind us in the interval (i.e., today
# is day 5 (Saturday) and rollover is on day 3 (Thursday).
# Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
# number of days left in the current week (1) plus the number
# of days in the next week until the rollover day (3).
# The calculations described in 2) and 3) above need to have a day added.
# This is because the above time calculation takes us to midnight on this
# day, i.e. the start of the next day.
if self.when.startswith('W'):
day = currentDay # 0 is Monday
if day != self.dayOfWeek:
if day < self.dayOfWeek:
daysToWait = self.dayOfWeek - day
else:
daysToWait = 6 - day + self.dayOfWeek + 1
newRolloverAt = result + (daysToWait * (60 * 60 * 24))
if not self.utc:
dstNow = t[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
result = newRolloverAt
return result
def shouldRollover(self, record):
"""
Determine if rollover should occur.
record is not used, as we are just comparing times, but it is needed so
the method signatures are the same
"""
t = int(time.time())
if t >= self.rolloverAt:
return 1
return 0
def getFilesToDelete(self):
"""
Determine the files to delete when rolling over.
More specific than the earlier method, which just used glob.glob().
"""
dirName, baseName = os.path.split(self.baseFilename)
fileNames = os.listdir(dirName)
result = []
prefix = baseName + "."
plen = len(prefix)
for fileName in fileNames:
if fileName[:plen] == prefix:
suffix = fileName[plen:]
if self.extMatch.match(suffix):
result.append(os.path.join(dirName, fileName))
result.sort()
if len(result) < self.backupCount:
result = []
else:
result = result[:len(result) - self.backupCount]
return result
def doRollover(self):
"""
do a rollover; in this case, a date/time stamp is appended to the filename
when the rollover happens. However, you want the file to be named for the
start of the interval, not the current time. If there is a backup count,
then we have to get a list of matching filenames, sort them and remove
the one with the oldest suffix.
"""
if self.stream:
self.stream.close()
self.stream = None
# get the time that this sequence started at and make it a TimeTuple
currentTime = int(time.time())
dstNow = time.localtime(currentTime)[-1]
t = self.rolloverAt - self.interval
if self.utc:
timeTuple = time.gmtime(t)
else:
timeTuple = time.localtime(t)
dstThen = timeTuple[-1]
if dstNow != dstThen:
if dstNow:
addend = 3600
else:
addend = -3600
timeTuple = time.localtime(t + addend)
dfn = self.rotation_filename(self.baseFilename + "." +
time.strftime(self.suffix, timeTuple))
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if self.backupCount > 0:
for s in self.getFilesToDelete():
os.remove(s)
if not self.delay:
self.stream = self._open()
newRolloverAt = self.computeRollover(currentTime)
while newRolloverAt <= currentTime:
newRolloverAt = newRolloverAt + self.interval
#If DST changes and midnight or weekly rollover, adjust for this.
if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
self.rolloverAt = newRolloverAt
class WatchedFileHandler(logging.FileHandler):
"""
A handler for logging to a file, which watches the file
to see if it has changed while in use. This can happen because of
usage of programs such as newsyslog and logrotate which perform
log file rotation. This handler, intended for use under Unix,
watches the file to see if it has changed since the last emit.
(A file has changed if its device or inode have changed.)
If it has changed, the old file stream is closed, and the file
opened to get a new stream.
This handler is not appropriate for use under Windows, because
under Windows open files cannot be moved or renamed - logging
opens the files with exclusive locks - and so there is no need
for such a handler. Furthermore, ST_INO is not supported under
Windows; stat always returns zero for this value.
This handler is based on a suggestion and patch by Chad J.
Schroeder.
"""
def __init__(self, filename, mode='a', encoding=None, delay=False):
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
self.dev, self.ino = -1, -1
self._statstream()
def _statstream(self):
if self.stream:
sres = os.fstat(self.stream.fileno())
self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
def emit(self, record):
"""
Emit a record.
First check if the underlying file has changed, and if it
has, close the old stream and reopen the file to get the
current stream.
"""
# Reduce the chance of race conditions by stat'ing by path only
# once and then fstat'ing our new fd if we opened a new log stream.
# See issue #14632: Thanks to John Mulligan for the problem report
# and patch.
try:
# stat the file by path, checking for existence
sres = os.stat(self.baseFilename)
except FileNotFoundError:
sres = None
# compare file system stat with that of our stream file handle
if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
if self.stream is not None:
# we have an open file handle, clean it up
self.stream.flush()
self.stream.close()
self.stream = None # See Issue #21742: _open () might fail.
# open a new file handle and get new stat info from that fd
self.stream = self._open()
self._statstream()
logging.FileHandler.emit(self, record)
class SocketHandler(logging.Handler):
"""
A handler class which writes logging records, in pickle format, to
a streaming socket. The socket is kept open across logging calls.
If the peer resets it, an attempt is made to reconnect on the next call.
The pickle which is sent is that of the LogRecord's attribute dictionary
(__dict__), so that the receiver does not need to have the logging module
installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
When the attribute *closeOnError* is set to True - if a socket error
occurs, the socket is silently closed and then reopened on the next
logging call.
"""
logging.Handler.__init__(self)
self.host = host
self.port = port
if port is None:
self.address = host
else:
self.address = (host, port)
self.sock = None
self.closeOnError = False
self.retryTime = None
#
# Exponential backoff parameters.
#
self.retryStart = 1.0
self.retryMax = 30.0
self.retryFactor = 2.0
def makeSocket(self, timeout=1):
"""
A factory method which allows subclasses to define the precise
type of socket they want.
"""
if self.port is not None:
result = socket.create_connection(self.address, timeout=timeout)
else:
result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
result.settimeout(timeout)
try:
result.connect(self.address)
except OSError:
result.close() # Issue 19182
raise
return result
def createSocket(self):
"""
Try to create a socket, using an exponential backoff with
a max retry time. Thanks to Robert Olson for the original patch
(SF #815911) which has been slightly refactored.
"""
now = time.time()
# Either retryTime is None, in which case this
# is the first time back after a disconnect, or
# we've waited long enough.
if self.retryTime is None:
attempt = True
else:
attempt = (now >= self.retryTime)
if attempt:
try:
self.sock = self.makeSocket()
self.retryTime = None # next time, no delay before trying
except OSError:
#Creation failed, so set the retry time and return.
if self.retryTime is None:
self.retryPeriod = self.retryStart
else:
self.retryPeriod = self.retryPeriod * self.retryFactor
if self.retryPeriod > self.retryMax:
self.retryPeriod = self.retryMax
self.retryTime = now + self.retryPeriod
def send(self, s):
"""
Send a pickled string to the socket.
This function allows for partial sends which can happen when the
network is busy.
"""
if self.sock is None:
self.createSocket()
#self.sock can be None either because we haven't reached the retry
#time yet, or because we have reached the retry time and retried,
#but are still unable to connect.
if self.sock:
try:
self.sock.sendall(s)
except OSError: #pragma: no cover
self.sock.close()
self.sock = None # so we can call createSocket next time
def makePickle(self, record):
"""
Pickles the record in binary format with a length prefix, and
returns it ready for transmission across the socket.
"""
ei = record.exc_info
if ei:
# just to get traceback text into record.exc_text ...
dummy = self.format(record)
# See issue #14436: If msg or args are objects, they may not be
# available on the receiving end. So we convert the msg % args
# to a string, save it as msg and zap the args.
d = dict(record.__dict__)
d['msg'] = record.getMessage()
d['args'] = None
d['exc_info'] = None
s = pickle.dumps(d, 1)
slen = struct.pack(">L", len(s))
return slen + s
def handleError(self, record):
"""
Handle an error during logging.
An error has occurred during logging. Most likely cause -
connection lost. Close the socket so that we can retry on the
next event.
"""
if self.closeOnError and self.sock:
self.sock.close()
self.sock = None #try to reconnect next time
else:
logging.Handler.handleError(self, record)
def emit(self, record):
"""
Emit a record.
Pickles the record and writes it to the socket in binary format.
If there is an error with the socket, silently drop the packet.
If there was a problem with the socket, re-establishes the
socket.
"""
try:
s = self.makePickle(record)
self.send(s)
except Exception:
self.handleError(record)
def close(self):
"""
Closes the socket.
"""
self.acquire()
try:
if self.sock:
self.sock.close()
self.sock = None
logging.Handler.close(self)
finally:
self.release()
class DatagramHandler(SocketHandler):
"""
A handler class which writes logging records, in pickle format, to
a datagram socket. The pickle which is sent is that of the LogRecord's
attribute dictionary (__dict__), so that the receiver does not need to
have the logging module installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
"""
SocketHandler.__init__(self, host, port)
self.closeOnError = False
def makeSocket(self):
"""
The factory method of SocketHandler is here overridden to create
a UDP socket (SOCK_DGRAM).
"""
if self.port is None:
family = socket.AF_UNIX
else:
family = socket.AF_INET
s = socket.socket(family, socket.SOCK_DGRAM)
return s
def send(self, s):
"""
Send a pickled string to a socket.
This function no longer allows for partial sends which can happen
when the network is busy - UDP does not guarantee delivery and
can deliver packets out of sequence.
"""
if self.sock is None:
self.createSocket()
self.sock.sendto(s, self.address)
class SysLogHandler(logging.Handler):
"""
A handler class which sends formatted logging records to a syslog
server. Based on Sam Rushing's syslog module:
http://www.nightmare.com/squirl/python-ext/misc/syslog.py
Contributed by Nicolas Untz (after which minor refactoring changes
have been made).
"""
# from <linux/sys/syslog.h>:
# ======================================================================
# priorities/facilities are encoded into a single 32-bit quantity, where
# the bottom 3 bits are the priority (0-7) and the top 28 bits are the
# facility (0-big number). Both the priorities and the facilities map
# roughly one-to-one to strings in the syslogd(8) source code. This
# mapping is included in this file.
#
# priorities (these are ordered)
LOG_EMERG = 0 # system is unusable
LOG_ALERT = 1 # action must be taken immediately
LOG_CRIT = 2 # critical conditions
LOG_ERR = 3 # error conditions
LOG_WARNING = 4 # warning conditions
LOG_NOTICE = 5 # normal but significant condition
LOG_INFO = 6 # informational
LOG_DEBUG = 7 # debug-level messages
# facility codes
LOG_KERN = 0 # kernel messages
LOG_USER = 1 # random user-level messages
LOG_MAIL = 2 # mail system
LOG_DAEMON = 3 # system daemons
LOG_AUTH = 4 # security/authorization messages
LOG_SYSLOG = 5 # messages generated internally by syslogd
LOG_LPR = 6 # line printer subsystem
LOG_NEWS = 7 # network news subsystem
LOG_UUCP = 8 # UUCP subsystem
LOG_CRON = 9 # clock daemon
LOG_AUTHPRIV = 10 # security/authorization messages (private)
LOG_FTP = 11 # FTP daemon
# other codes through 15 reserved for system use
LOG_LOCAL0 = 16 # reserved for local use
LOG_LOCAL1 = 17 # reserved for local use
LOG_LOCAL2 = 18 # reserved for local use
LOG_LOCAL3 = 19 # reserved for local use
LOG_LOCAL4 = 20 # reserved for local use
LOG_LOCAL5 = 21 # reserved for local use
LOG_LOCAL6 = 22 # reserved for local use
LOG_LOCAL7 = 23 # reserved for local use
priority_names = {
"alert": LOG_ALERT,
"crit": LOG_CRIT,
"critical": LOG_CRIT,
"debug": LOG_DEBUG,
"emerg": LOG_EMERG,
"err": LOG_ERR,
"error": LOG_ERR, # DEPRECATED
"info": LOG_INFO,
"notice": LOG_NOTICE,
"panic": LOG_EMERG, # DEPRECATED
"warn": LOG_WARNING, # DEPRECATED
"warning": LOG_WARNING,
}
facility_names = {
"auth": LOG_AUTH,
"authpriv": LOG_AUTHPRIV,
"cron": LOG_CRON,
"daemon": LOG_DAEMON,
"ftp": LOG_FTP,
"kern": LOG_KERN,
"lpr": LOG_LPR,
"mail": LOG_MAIL,
"news": LOG_NEWS,
"security": LOG_AUTH, # DEPRECATED
"syslog": LOG_SYSLOG,
"user": LOG_USER,
"uucp": LOG_UUCP,
"local0": LOG_LOCAL0,
"local1": LOG_LOCAL1,
"local2": LOG_LOCAL2,
"local3": LOG_LOCAL3,
"local4": LOG_LOCAL4,
"local5": LOG_LOCAL5,
"local6": LOG_LOCAL6,
"local7": LOG_LOCAL7,
}
#The map below appears to be trivially lowercasing the key. However,
#there's more to it than meets the eye - in some locales, lowercasing
#gives unexpected results. See SF #1524081: in the Turkish locale,
#"INFO".lower() != "info"
priority_map = {
"DEBUG" : "debug",
"INFO" : "info",
"WARNING" : "warning",
"ERROR" : "error",
"CRITICAL" : "critical"
}
def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
facility=LOG_USER, socktype=None):
"""
Initialize a handler.
If address is specified as a string, a UNIX socket is used. To log to a
local syslogd, "SysLogHandler(address="/dev/log")" can be used.
If facility is not specified, LOG_USER is used. If socktype is
specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific
socket type will be used. For Unix sockets, you can also specify a
socktype of None, in which case socket.SOCK_DGRAM will be used, falling
back to socket.SOCK_STREAM.
"""
logging.Handler.__init__(self)
self.address = address
self.facility = facility
self.socktype = socktype
if isinstance(address, str):
self.unixsocket = True
self._connect_unixsocket(address)
else:
self.unixsocket = False
if socktype is None:
socktype = socket.SOCK_DGRAM
self.socket = socket.socket(socket.AF_INET, socktype)
if socktype == socket.SOCK_STREAM:
self.socket.connect(address)
self.socktype = socktype
self.formatter = None
def _connect_unixsocket(self, address):
use_socktype = self.socktype
if use_socktype is None:
use_socktype = socket.SOCK_DGRAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except OSError:
self.socket.close()
if self.socktype is not None:
# user didn't specify falling back, so fail
raise
use_socktype = socket.SOCK_STREAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except OSError:
self.socket.close()
raise
def encodePriority(self, facility, priority):
"""
Encode the facility and priority. You can pass in strings or
integers - if strings are passed, the facility_names and
priority_names mapping dictionaries are used to convert them to
integers.
"""
if isinstance(facility, str):
facility = self.facility_names[facility]
if isinstance(priority, str):
priority = self.priority_names[priority]
return (facility << 3) | priority
def close (self):
"""
Closes the socket.
"""
self.acquire()
try:
self.socket.close()
logging.Handler.close(self)
finally:
self.release()
def mapPriority(self, levelName):
"""
Map a logging level name to a key in the priority_names map.
This is useful in two scenarios: when custom levels are being
used, and in the case where you can't do a straightforward
mapping by lowercasing the logging level name because of locale-
specific issues (see SF #1524081).
"""
return self.priority_map.get(levelName, "warning")
ident = '' # prepended to all messages
append_nul = True # some old syslog daemons expect a NUL terminator
def emit(self, record):
"""
Emit a record.
The record is formatted, and then sent to the syslog server. If
exception information is present, it is NOT sent to the server.
"""
msg = self.format(record)
if self.ident:
msg = self.ident + msg
if self.append_nul:
msg += '\000'
# We need to convert record level to lowercase, maybe this will
# change in the future.
prio = '<%d>' % self.encodePriority(self.facility,
self.mapPriority(record.levelname))
prio = prio.encode('utf-8')
# Message is a string. Convert to bytes as required by RFC 5424
msg = msg.encode('utf-8')
msg = prio + msg
try:
if self.unixsocket:
try:
self.socket.send(msg)
except OSError:
self.socket.close()
self._connect_unixsocket(self.address)
self.socket.send(msg)
elif self.socktype == socket.SOCK_DGRAM:
self.socket.sendto(msg, self.address)
else:
self.socket.sendall(msg)
except Exception:
self.handleError(record)
class SMTPHandler(logging.Handler):
"""
A handler class which sends an SMTP email for each logging event.
"""
def __init__(self, mailhost, fromaddr, toaddrs, subject,
credentials=None, secure=None, timeout=5.0):
"""
Initialize the handler.
Initialize the instance with the from and to addresses and subject
line of the email. To specify a non-standard SMTP port, use the
(host, port) tuple format for the mailhost argument. To specify
authentication credentials, supply a (username, password) tuple
for the credentials argument. To specify the use of a secure
protocol (TLS), pass in a tuple for the secure argument. This will
only be used when authentication credentials are supplied. The tuple
will be either an empty tuple, or a single-value tuple with the name
of a keyfile, or a 2-value tuple with the names of the keyfile and
certificate file. (This tuple is passed to the `starttls` method).
A timeout in seconds can be specified for the SMTP connection (the
default is one second).
"""
logging.Handler.__init__(self)
if isinstance(mailhost, tuple):
self.mailhost, self.mailport = mailhost
else:
self.mailhost, self.mailport = mailhost, None
if isinstance(credentials, tuple):
self.username, self.password = credentials
else:
self.username = None
self.fromaddr = fromaddr
if isinstance(toaddrs, str):
toaddrs = [toaddrs]
self.toaddrs = toaddrs
self.subject = subject
self.secure = secure
self.timeout = timeout
def getSubject(self, record):
"""
Determine the subject for the email.
If you want to specify a subject line which is record-dependent,
override this method.
"""
return self.subject
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified addressees.
"""
try:
import smtplib
from email.utils import formatdate
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout)
msg = self.format(record)
msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
self.fromaddr,
",".join(self.toaddrs),
self.getSubject(record),
formatdate(), msg)
if self.username:
if self.secure is not None:
smtp.ehlo()
smtp.starttls(*self.secure)
smtp.ehlo()
smtp.login(self.username, self.password)
smtp.sendmail(self.fromaddr, self.toaddrs, msg)
smtp.quit()
except Exception:
self.handleError(record)
class NTEventLogHandler(logging.Handler):
"""
A handler class which sends events to the NT Event Log. Adds a
registry entry for the specified application name. If no dllname is
provided, win32service.pyd (which contains some basic message
placeholders) is used. Note that use of these placeholders will make
your event logs big, as the entire message source is held in the log.
If you want slimmer logs, you have to pass in the name of your own DLL
which contains the message definitions you want to use in the event log.
"""
def __init__(self, appname, dllname=None, logtype="Application"):
logging.Handler.__init__(self)
try:
import win32evtlogutil, win32evtlog
self.appname = appname
self._welu = win32evtlogutil
if not dllname:
dllname = os.path.split(self._welu.__file__)
dllname = os.path.split(dllname[0])
dllname = os.path.join(dllname[0], r'win32service.pyd')
self.dllname = dllname
self.logtype = logtype
self._welu.AddSourceToRegistry(appname, dllname, logtype)
self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
self.typemap = {
logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
}
except ImportError:
print("The Python Win32 extensions for NT (service, event "\
"logging) appear not to be available.")
self._welu = None
def getMessageID(self, record):
"""
Return the message ID for the event record. If you are using your
own messages, you could do this by having the msg passed to the
logger being an ID rather than a formatting string. Then, in here,
you could use a dictionary lookup to get the message ID. This
version returns 1, which is the base message ID in win32service.pyd.
"""
return 1
def getEventCategory(self, record):
"""
Return the event category for the record.
Override this if you want to specify your own categories. This version
returns 0.
"""
return 0
def getEventType(self, record):
"""
Return the event type for the record.
Override this if you want to specify your own types. This version does
a mapping using the handler's typemap attribute, which is set up in
__init__() to a dictionary which contains mappings for DEBUG, INFO,
WARNING, ERROR and CRITICAL. If you are using your own levels you will
either need to override this method or place a suitable dictionary in
the handler's typemap attribute.
"""
return self.typemap.get(record.levelno, self.deftype)
def emit(self, record):
"""
Emit a record.
Determine the message ID, event category and event type. Then
log the message in the NT event log.
"""
if self._welu:
try:
id = self.getMessageID(record)
cat = self.getEventCategory(record)
type = self.getEventType(record)
msg = self.format(record)
self._welu.ReportEvent(self.appname, id, cat, type, [msg])
except Exception:
self.handleError(record)
def close(self):
"""
Clean up this handler.
You can remove the application name from the registry as a
source of event log entries. However, if you do this, you will
not be able to see the events as you intended in the Event Log
Viewer - it needs to be able to access the registry to get the
DLL name.
"""
#self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
logging.Handler.close(self)
class HTTPHandler(logging.Handler):
"""
A class which sends records to a Web server, using either GET or
POST semantics.
"""
def __init__(self, host, url, method="GET", secure=False, credentials=None):
"""
Initialize the instance with the host, the request URL, and the method
("GET" or "POST")
"""
logging.Handler.__init__(self)
method = method.upper()
if method not in ["GET", "POST"]:
raise ValueError("method must be GET or POST")
self.host = host
self.url = url
self.method = method
self.secure = secure
self.credentials = credentials
def mapLogRecord(self, record):
"""
Default implementation of mapping the log record into a dict
that is sent as the CGI data. Overwrite in your class.
Contributed by Franz Glasner.
"""
return record.__dict__
def emit(self, record):
"""
Emit a record.
Send the record to the Web server as a percent-encoded dictionary
"""
try:
import http.client, urllib.parse
host = self.host
if self.secure:
h = http.client.HTTPSConnection(host)
else:
h = http.client.HTTPConnection(host)
url = self.url
data = urllib.parse.urlencode(self.mapLogRecord(record))
if self.method == "GET":
if (url.find('?') >= 0):
sep = '&'
else:
sep = '?'
url = url + "%c%s" % (sep, data)
h.putrequest(self.method, url)
# support multiple hosts on one IP address...
# need to strip optional :port from host, if present
i = host.find(":")
if i >= 0:
host = host[:i]
h.putheader("Host", host)
if self.method == "POST":
h.putheader("Content-type",
"application/x-www-form-urlencoded")
h.putheader("Content-length", str(len(data)))
if self.credentials:
import base64
s = ('u%s:%s' % self.credentials).encode('utf-8')
s = 'Basic ' + base64.b64encode(s).strip()
h.putheader('Authorization', s)
h.endheaders()
if self.method == "POST":
h.send(data.encode('utf-8'))
h.getresponse() #can't do anything with the result
except Exception:
self.handleError(record)
class BufferingHandler(logging.Handler):
"""
A handler class which buffers logging records in memory. Whenever each
record is added to the buffer, a check is made to see if the buffer should
be flushed. If it should, then flush() is expected to do what's needed.
"""
def __init__(self, capacity):
"""
Initialize the handler with the buffer size.
"""
logging.Handler.__init__(self)
self.capacity = capacity
self.buffer = []
def shouldFlush(self, record):
"""
Should the handler flush its buffer?
Returns true if the buffer is up to capacity. This method can be
overridden to implement custom flushing strategies.
"""
return (len(self.buffer) >= self.capacity)
def emit(self, record):
"""
Emit a record.
Append the record. If shouldFlush() tells us to, call flush() to process
the buffer.
"""
self.buffer.append(record)
if self.shouldFlush(record):
self.flush()
def flush(self):
"""
Override to implement custom flushing behaviour.
This version just zaps the buffer to empty.
"""
self.acquire()
try:
self.buffer = []
finally:
self.release()
def close(self):
"""
Close the handler.
This version just flushes and chains to the parent class' close().
"""
self.flush()
logging.Handler.close(self)
class MemoryHandler(BufferingHandler):
"""
A handler class which buffers logging records in memory, periodically
flushing them to a target handler. Flushing occurs whenever the buffer
is full, or when an event of a certain severity or greater is seen.
"""
def __init__(self, capacity, flushLevel=logging.ERROR, target=None):
"""
Initialize the handler with the buffer size, the level at which
flushing should occur and an optional target.
Note that without a target being set either here or via setTarget(),
a MemoryHandler is no use to anyone!
"""
BufferingHandler.__init__(self, capacity)
self.flushLevel = flushLevel
self.target = target
def shouldFlush(self, record):
"""
Check for buffer full or a record at the flushLevel or higher.
"""
return (len(self.buffer) >= self.capacity) or \
(record.levelno >= self.flushLevel)
def setTarget(self, target):
"""
Set the target handler for this handler.
"""
self.target = target
def flush(self):
"""
For a MemoryHandler, flushing means just sending the buffered
records to the target, if there is one. Override if you want
different behaviour.
The record buffer is also cleared by this operation.
"""
self.acquire()
try:
if self.target:
for record in self.buffer:
self.target.handle(record)
self.buffer = []
finally:
self.release()
def close(self):
"""
Flush, set the target to None and lose the buffer.
"""
self.flush()
self.acquire()
try:
self.target = None
BufferingHandler.close(self)
finally:
self.release()
class QueueHandler(logging.Handler):
"""
This handler sends events to a queue. Typically, it would be used together
with a multiprocessing Queue to centralise logging to file in one process
(in a multi-process application), so as to avoid file write contention
between processes.
This code is new in Python 3.2, but this class can be copy pasted into
user code for use with earlier Python versions.
"""
def __init__(self, queue):
"""
Initialise an instance, using the passed queue.
"""
logging.Handler.__init__(self)
self.queue = queue
def enqueue(self, record):
"""
Enqueue a record.
The base implementation uses put_nowait. You may want to override
this method if you want to use blocking, timeouts or custom queue
implementations.
"""
self.queue.put_nowait(record)
def prepare(self, record):
"""
Prepares a record for queuing. The object returned by this method is
enqueued.
The base implementation formats the record to merge the message
and arguments, and removes unpickleable items from the record
in-place.
You might want to override this method if you want to convert
the record to a dict or JSON string, or send a modified copy
of the record while leaving the original intact.
"""
# The format operation gets traceback text into record.exc_text
# (if there's exception data), and also puts the message into
# record.message. We can then use this to replace the original
# msg + args, as these might be unpickleable. We also zap the
# exc_info attribute, as it's no longer needed and, if not None,
# will typically not be pickleable.
self.format(record)
record.msg = record.message
record.args = None
record.exc_info = None
return record
def emit(self, record):
"""
Emit a record.
Writes the LogRecord to the queue, preparing it for pickling first.
"""
try:
self.enqueue(self.prepare(record))
except Exception:
self.handleError(record)
if threading:
class QueueListener(object):
"""
This class implements an internal threaded listener which watches for
LogRecords being added to a queue, removes them and passes them to a
list of handlers for processing.
"""
_sentinel = None
def __init__(self, queue, *handlers):
"""
Initialise an instance with the specified queue and
handlers.
"""
self.queue = queue
self.handlers = handlers
self._stop = threading.Event()
self._thread = None
def dequeue(self, block):
"""
Dequeue a record and return it, optionally blocking.
The base implementation uses get. You may want to override this method
if you want to use timeouts or work with custom queue implementations.
"""
return self.queue.get(block)
def start(self):
"""
Start the listener.
This starts up a background thread to monitor the queue for
LogRecords to process.
"""
self._thread = t = threading.Thread(target=self._monitor)
t.setDaemon(True)
t.start()
def prepare(self , record):
"""
Prepare a record for handling.
This method just returns the passed-in record. You may want to
override this method if you need to do any custom marshalling or
manipulation of the record before passing it to the handlers.
"""
return record
def handle(self, record):
"""
Handle a record.
This just loops through the handlers offering them the record
to handle.
"""
record = self.prepare(record)
for handler in self.handlers:
handler.handle(record)
def _monitor(self):
"""
Monitor the queue for records, and ask the handler
to deal with them.
This method runs on a separate, internal thread.
The thread will terminate if it sees a sentinel object in the queue.
"""
q = self.queue
has_task_done = hasattr(q, 'task_done')
while not self._stop.isSet():
try:
record = self.dequeue(True)
if record is self._sentinel:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
pass
# There might still be records in the queue.
while True:
try:
record = self.dequeue(False)
if record is self._sentinel:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
break
def enqueue_sentinel(self):
"""
This is used to enqueue the sentinel record.
The base implementation uses put_nowait. You may want to override this
method if you want to use timeouts or work with custom queue
implementations.
"""
self.queue.put_nowait(self._sentinel)
def stop(self):
"""
Stop the listener.
This asks the thread to terminate, and then waits for it to do so.
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
"""
self._stop.set()
self.enqueue_sentinel()
self._thread.join()
self._thread = None
| lgpl-3.0 |
levilucio/SyVOLT | UMLRT2Kiltera_MM/collapse_rules/models/strip_remove_match_applymodel_MDL.py | 6 | 9009 | """
__strip_remove_match_applymodel_MDL.py_____________________________________________________
Automatically generated AToM3 Model File (Do not modify directly)
Author: levi
Modified: Sat Jun 1 13:35:44 2013
___________________________________________________________________________________________
"""
from stickylink import *
from widthXfillXdecoration import *
from MT_pre__MatchModel import *
from MT_pre__ApplyModel import *
from MT_pre__paired_with import *
from RHS import *
from LHS import *
from graph_LHS import *
from graph_RHS import *
from graph_MT_pre__paired_with import *
from graph_MT_pre__MatchModel import *
from graph_MT_pre__ApplyModel import *
from ATOM3Enum import *
from ATOM3String import *
from ATOM3BottomType import *
from ATOM3Constraint import *
from ATOM3Attribute import *
from ATOM3Float import *
from ATOM3List import *
from ATOM3Link import *
from ATOM3Connection import *
from ATOM3Boolean import *
from ATOM3Appearance import *
from ATOM3Text import *
from ATOM3Action import *
from ATOM3Integer import *
from ATOM3Port import *
from ATOM3MSEnum import *
def strip_remove_match_applymodel_MDL(self, rootNode, MT_pre__GM2AUTOSAR_MMRootNode=None, MoTifRuleRootNode=None):
# --- Generating attributes code for ASG MT_pre__GM2AUTOSAR_MM ---
if( MT_pre__GM2AUTOSAR_MMRootNode ):
# author
MT_pre__GM2AUTOSAR_MMRootNode.author.setValue('Annonymous')
# description
MT_pre__GM2AUTOSAR_MMRootNode.description.setValue('\n')
MT_pre__GM2AUTOSAR_MMRootNode.description.setHeight(15)
# name
MT_pre__GM2AUTOSAR_MMRootNode.name.setValue('')
MT_pre__GM2AUTOSAR_MMRootNode.name.setNone()
# --- ASG attributes over ---
# --- Generating attributes code for ASG MoTifRule ---
if( MoTifRuleRootNode ):
# author
MoTifRuleRootNode.author.setValue('Annonymous')
# description
MoTifRuleRootNode.description.setValue('\n')
MoTifRuleRootNode.description.setHeight(15)
# name
MoTifRuleRootNode.name.setValue('StripRemoveMatchApplyModels')
# --- ASG attributes over ---
self.obj88=MT_pre__MatchModel(self)
self.obj88.isGraphObjectVisual = True
if(hasattr(self.obj88, '_setHierarchicalLink')):
self.obj88._setHierarchicalLink(False)
# MT_label__
self.obj88.MT_label__.setValue('1')
# MT_pivotOut__
self.obj88.MT_pivotOut__.setValue('')
self.obj88.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj88.MT_subtypeMatching__.setValue(('True', 0))
self.obj88.MT_subtypeMatching__.config = 0
# MT_pivotIn__
self.obj88.MT_pivotIn__.setValue('')
self.obj88.MT_pivotIn__.setNone()
self.obj88.graphClass_= graph_MT_pre__MatchModel
if self.genGraphics:
new_obj = graph_MT_pre__MatchModel(320.0,180.0,self.obj88)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__MatchModel", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj88.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj88)
self.globalAndLocalPostcondition(self.obj88, rootNode)
self.obj88.postAction( rootNode.CREATE )
self.obj89=MT_pre__ApplyModel(self)
self.obj89.isGraphObjectVisual = True
if(hasattr(self.obj89, '_setHierarchicalLink')):
self.obj89._setHierarchicalLink(False)
# MT_label__
self.obj89.MT_label__.setValue('2')
# MT_pivotOut__
self.obj89.MT_pivotOut__.setValue('')
self.obj89.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj89.MT_subtypeMatching__.setValue(('True', 0))
self.obj89.MT_subtypeMatching__.config = 0
# MT_pivotIn__
self.obj89.MT_pivotIn__.setValue('')
self.obj89.MT_pivotIn__.setNone()
self.obj89.graphClass_= graph_MT_pre__ApplyModel
if self.genGraphics:
new_obj = graph_MT_pre__ApplyModel(320.0,360.0,self.obj89)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__ApplyModel", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj89.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj89)
self.globalAndLocalPostcondition(self.obj89, rootNode)
self.obj89.postAction( rootNode.CREATE )
self.obj90=MT_pre__paired_with(self)
self.obj90.isGraphObjectVisual = True
if(hasattr(self.obj90, '_setHierarchicalLink')):
self.obj90._setHierarchicalLink(False)
# MT_label__
self.obj90.MT_label__.setValue('3')
# MT_pivotOut__
self.obj90.MT_pivotOut__.setValue('')
self.obj90.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj90.MT_subtypeMatching__.setValue(('True', 0))
self.obj90.MT_subtypeMatching__.config = 0
# MT_pivotIn__
self.obj90.MT_pivotIn__.setValue('')
self.obj90.MT_pivotIn__.setNone()
self.obj90.graphClass_= graph_MT_pre__paired_with
if self.genGraphics:
new_obj = graph_MT_pre__paired_with(483.5,343.5,self.obj90)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__paired_with", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
else: new_obj = None
self.obj90.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj90)
self.globalAndLocalPostcondition(self.obj90, rootNode)
self.obj90.postAction( rootNode.CREATE )
self.obj91=RHS(self)
self.obj91.isGraphObjectVisual = True
if(hasattr(self.obj91, '_setHierarchicalLink')):
self.obj91._setHierarchicalLink(False)
# action
self.obj91.action.setValue('#===============================================================================\n# This code is executed after the rule has been applied.\n# You can access a node labelled n matched by this rule by: PostNode(\'n\').\n# To access attribute x of node n, use: PostNode(\'n\')[\'x\'].\n#===============================================================================\n\npass\n')
self.obj91.action.setHeight(15)
self.obj91.graphClass_= graph_RHS
if self.genGraphics:
new_obj = graph_RHS(620.0,140.0,self.obj91)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("RHS", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj91.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj91)
self.globalAndLocalPostcondition(self.obj91, rootNode)
self.obj91.postAction( rootNode.CREATE )
self.obj87=LHS(self)
self.obj87.isGraphObjectVisual = True
if(hasattr(self.obj87, '_setHierarchicalLink')):
self.obj87._setHierarchicalLink(False)
# constraint
self.obj87.constraint.setValue('#===============================================================================\n# This code is executed after the nodes in the LHS have been matched.\n# You can access a matched node labelled n by: PreNode(\'n\').\n# To access attribute x of node n, use: PreNode(\'n\')[\'x\'].\n# The given constraint must evaluate to a boolean expression:\n# returning True enables the rule to be applied,\n# returning False forbids the rule from being applied.\n#===============================================================================\n\nreturn True\n')
self.obj87.constraint.setHeight(15)
self.obj87.graphClass_= graph_LHS
if self.genGraphics:
new_obj = graph_LHS(200.0,140.0,self.obj87)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("LHS", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj87.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj87)
self.globalAndLocalPostcondition(self.obj87, rootNode)
self.obj87.postAction( rootNode.CREATE )
# Connections for obj88 (graphObject_: Obj1) of type MT_pre__MatchModel
self.drawConnections(
(self.obj88,self.obj90,[479.0, 253.0, 483.5, 343.5],"true", 2) )
# Connections for obj89 (graphObject_: Obj2) of type MT_pre__ApplyModel
self.drawConnections(
)
# Connections for obj90 (graphObject_: Obj3) of type MT_pre__paired_with
self.drawConnections(
(self.obj90,self.obj89,[483.5, 343.5, 488.0, 434.0],"true", 2) )
# Connections for obj91 (graphObject_: Obj4) of type RHS
self.drawConnections(
)
# Connections for obj87 (graphObject_: Obj0) of type LHS
self.drawConnections(
)
newfunction = strip_remove_match_applymodel_MDL
loadedMMName = ['MT_pre__GM2AUTOSAR_MM_META', 'MoTifRule_META']
atom3version = '0.3'
| mit |
hunch/hunch-sample-app | django/contrib/localflavor/ie/ie_counties.py | 12 | 1167 | """
Sources:
Irish Counties: http://en.wikipedia.org/wiki/Counties_of_Ireland
"""
from django.utils.translation import ugettext_lazy as _
IE_COUNTY_CHOICES = (
('antrim', _('Antrim')),
('armagh', _('Armagh')),
('carlow', _('Carlow')),
('cavan', _('Cavan')),
('clare', _('Clare')),
('cork', _('Cork')),
('derry', _('Derry')),
('donegal', _('Donegal')),
('down', _('Down')),
('dublin', _('Dublin')),
('fermanagh', _('Fermanagh')),
('galway', _('Galway')),
('kerry', _('Kerry')),
('kildare', _('Kildare')),
('kilkenny', _('Kilkenny')),
('laois', _('Laois')),
('leitrim', _('Leitrim')),
('limerick', _('Limerick')),
('longford', _('Longford')),
('louth', _('Louth')),
('mayo', _('Mayo')),
('meath', _('Meath')),
('monaghan', _('Monaghan')),
('offaly', _('Offaly')),
('roscommon', _('Roscommon')),
('sligo', _('Sligo')),
('tipperary', _('Tipperary')),
('tyrone', _('Tyrone')),
('waterford', _('Waterford')),
('westmeath', _('Westmeath')),
('wexford', _('Wexford')),
('wicklow', _('Wicklow')),
)
| mit |
StuartLittlefair/astropy | astropy/wcs/tests/test_profiling.py | 8 | 2586 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import pytest
import numpy as np
from astropy.utils.data import get_pkg_data_filenames, get_pkg_data_contents
from astropy.utils.misc import NumpyRNGContext
from astropy import wcs
from astropy.wcs.wcs import FITSFixedWarning
# use the base name of the file, because everything we yield
# will show up in the test name in the pandokia report
hdr_map_file_list = [
os.path.basename(fname)
for fname in get_pkg_data_filenames("data/maps", pattern="*.hdr")]
# Checking the number of files before reading them in.
# OLD COMMENTS:
# AFTER we tested with every file that we found, check to see that we
# actually have the list we expect. If N=0, we will not have performed
# any tests at all. If N < n_data_files, we are missing some files,
# so we will have skipped some tests. Without this check, both cases
# happen silently!
def test_read_map_files():
# how many map files we expect to see
n_map_files = 28
assert len(hdr_map_file_list) == n_map_files, (
"test_read_map_files has wrong number data files: found {}, expected "
" {}".format(len(hdr_map_file_list), n_map_files))
@pytest.mark.parametrize("filename", hdr_map_file_list)
def test_map(filename):
header = get_pkg_data_contents(os.path.join("data/maps", filename))
wcsobj = wcs.WCS(header)
with NumpyRNGContext(123456789):
x = np.random.rand(2 ** 12, wcsobj.wcs.naxis)
wcsobj.wcs_pix2world(x, 1)
wcsobj.wcs_world2pix(x, 1)
hdr_spec_file_list = [
os.path.basename(fname)
for fname in get_pkg_data_filenames("data/spectra", pattern="*.hdr")]
def test_read_spec_files():
# how many spec files expected
n_spec_files = 6
assert len(hdr_spec_file_list) == n_spec_files, (
"test_spectra has wrong number data files: found {}, expected "
" {}".format(len(hdr_spec_file_list), n_spec_files))
# b.t.w. If this assert happens, pytest reports one more test
# than it would have otherwise.
@pytest.mark.parametrize("filename", hdr_spec_file_list)
def test_spectrum(filename):
header = get_pkg_data_contents(os.path.join("data", "spectra", filename))
# Warning only pops up for one of the inputs.
with pytest.warns(None) as warning_lines:
wcsobj = wcs.WCS(header)
for w in warning_lines:
assert issubclass(w.category, FITSFixedWarning)
with NumpyRNGContext(123456789):
x = np.random.rand(2 ** 16, wcsobj.wcs.naxis)
wcsobj.wcs_pix2world(x, 1)
wcsobj.wcs_world2pix(x, 1)
| bsd-3-clause |
keithhamilton/blackmaas | lib/python2.7/encodings/rot_13.py | 497 | 2579 | #!/usr/bin/env python
""" Python Character Mapping Codec for ROT13.
See http://ucsub.colorado.edu/~kominek/rot13/ for details.
Written by Marc-Andre Lemburg (mal@lemburg.com).
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_map)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='rot-13',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0041: 0x004e,
0x0042: 0x004f,
0x0043: 0x0050,
0x0044: 0x0051,
0x0045: 0x0052,
0x0046: 0x0053,
0x0047: 0x0054,
0x0048: 0x0055,
0x0049: 0x0056,
0x004a: 0x0057,
0x004b: 0x0058,
0x004c: 0x0059,
0x004d: 0x005a,
0x004e: 0x0041,
0x004f: 0x0042,
0x0050: 0x0043,
0x0051: 0x0044,
0x0052: 0x0045,
0x0053: 0x0046,
0x0054: 0x0047,
0x0055: 0x0048,
0x0056: 0x0049,
0x0057: 0x004a,
0x0058: 0x004b,
0x0059: 0x004c,
0x005a: 0x004d,
0x0061: 0x006e,
0x0062: 0x006f,
0x0063: 0x0070,
0x0064: 0x0071,
0x0065: 0x0072,
0x0066: 0x0073,
0x0067: 0x0074,
0x0068: 0x0075,
0x0069: 0x0076,
0x006a: 0x0077,
0x006b: 0x0078,
0x006c: 0x0079,
0x006d: 0x007a,
0x006e: 0x0061,
0x006f: 0x0062,
0x0070: 0x0063,
0x0071: 0x0064,
0x0072: 0x0065,
0x0073: 0x0066,
0x0074: 0x0067,
0x0075: 0x0068,
0x0076: 0x0069,
0x0077: 0x006a,
0x0078: 0x006b,
0x0079: 0x006c,
0x007a: 0x006d,
})
### Encoding Map
encoding_map = codecs.make_encoding_map(decoding_map)
### Filter API
def rot13(infile, outfile):
outfile.write(infile.read().encode('rot-13'))
if __name__ == '__main__':
import sys
rot13(sys.stdin, sys.stdout)
| bsd-3-clause |
lckung/spark-ec2 | launch-script/lib/boto-2.34.0/tests/integration/sdb/test_cert_verification.py | 126 | 1544 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Check that all of the certs on all service endpoints validate.
"""
import unittest
from tests.integration import ServiceCertVerificationTest
import boto.sdb
class SDBCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest):
sdb = True
regions = boto.sdb.regions()
def sample_service_call(self, conn):
conn.get_all_domains()
| apache-2.0 |
mattions/TimeScales | ecellControl/ecellManager.py | 1 | 8194 | # Author Michele Mattioni
# Fri Jan 30 15:57:01 GMT 2009
#import ecell.Session as Session
#from ecell.Session import Session
try:
from mySession import Session
import ecell.ecs
import ecell.config
import ecell.emc
except ImportError, e:
print "IMPORT ERROR: Ecell not available. Run the model the `bio_on` set as False."
import os
import numpy
from sumatra.external.NeuroTools import parameters
class EcellManager():
"""Control and instatiate the ecell simulator embedding it in an handy python object"""
def __init__(self, filename=None):
ecell.ecs.setDMSearchPath( os.pathsep.join( ecell.config.dm_path ) )
self.sim = ecell.emc.Simulator()
if ecell.config.version < '3.2.0':
self.ses = Session(self.sim, changeDirectory=False)
else:
self.ses = Session(self.sim)
# Load the model
self.ses.loadModel(filename)
self.molToTrack = ('ca',
'moles_bound_ca_per_moles_cam',
'Rbar',
'PP2Bbar',
'CaMKIIbar',
'PP1abar', # Active PP1/Total PP1
'AMPAR', #
'AMPAR_P',
'D',
'totDp',
'Dpbar'
)
# Tracking the calcium
self.ca = self.ses.createEntityStub( 'Variable:/Spine:ca' )
self.CaMKIIbar = self.ses.createEntityStub( 'Variable:/Spine:CaMKIIbar' )
self.ampar_P = self.ses.createEntityStub('Variable:/Spine:AMPAR_P')
self.ca_in = self.ses.createEntityStub('Process:/Spine:ca_in')
self.ca_leak = self.ses.createEntityStub('Process:/Spine:ca_leak')
self.ca_pump = self.ses.createEntityStub('Process:/Spine:ca_pump')
def createLoggers(self):
"""Create the logger to track the species"""
loggers = {}
#log = ecell.LoggerStub()
for mol in self.molToTrack:
loggers[mol] = self.ses.createLoggerStub( "Variable:/Spine:" + mol
+ ":Value" )
loggers[mol].create() # This creat the Logger Object in the backend
if mol == 'ca':
loggers['ca_conc'] = self.ses.createLoggerStub( "Variable:/Spine:" + mol
+ ":MolarConc" )
loggers['ca_conc'].create() # This creat the Logger Object in the backend
self.loggers = loggers
def calcWeight(CaMKIIbar, PP2Bbar, alpha, beta, n=3, k=0.5):
"""Calc the weight of the synapses according to the CaMKII and Pospahtases
PP2B and PP1"""
# CaMKII term
CaMKII_factor = math.pow(CaMKIIbar, n) / (math.pow(k, n) +
math.pow(CaMKIIbar, n))
Phosphatase_factor = math.pow(PP2Bbar, n) / (math.pow(k, n) +
math.pow(PP2Bbar, n))
scaled_CaMKII_factor = alpha * CaMKII_factor
scaled_Phospatese_factor = beta * Phosphatase_factor
weight = 1 + scaled_CaMKII_factor - scaled_Phospatese_factor
s = "Weight: %s CaMKII factor %s, Phosphatase factor %s" %(weight,
scaled_CaMKII_factor,
scaled_Phospatese_factor)
return weight
def calcium_peak(self, k_value, duration):
"""
Mimic the calcium peak
:Parameters
k_value: the rate of calcium to enter
duration: Duration of the spike
"""
basal = self.ca_in['k']
self.ca_in['k'] = k_value
self.ses.run(duration)
self.ca_in['k'] = basal
def calciumTrain(self, spikes=30, interval=0.1):
"""Create a train of calcium with the specified number of spikes and interval
:Parameter
spikes: number of spikes
interval: Interval between spikes
"""
for i in range(spikes):
self.calcium_peak(4.0e8, # Magic number from Lu
0.00001 #Really fast spike to avoid the overlap
)
self.ses.run(interval)
def converToTimeCourses(self):
timeCourses = {}
for key in self.loggers:
timeCourses[key] = self.loggers[key].getData()
self.timeCourses = timeCourses
##############################################
# Testing method
def testCalciumTrain(spikes_number, interval, filename):
"""Run a test simulation wit a train of calcium input"""
print "Test the results of a train of calcium"""
ecellManager = EcellManager(filename)
ecellManager.createLoggers()
#ecellManager.ca_in = ecellManager.ses.createEntityStub('Process:/Spine:ca_in')
print "Model loaded, loggers created. Integration start."
ecellManager.ses.run(300)
print "Calcium Train"
ecellManager.calciumTrain(spikes=spikes_number, interval=interval)
ecellManager.ses.run(400)
ecellManager.converToTimeCourses()
print "CalciumTrain Test Concluded\n##################"
return ecellManager
def testChangeCalciumValue(interval, caValue, filename="../biochemical_circuits/biomd183_noCalcium.eml"):
"""Run a test simulation changing the calcium value on the fly"""
print "Show case of the possibilities to change the level of calcium on the fly"
ecellManager = EcellManager(filename)
ecellManager.createLoggers()
print "Loggers created"
print "Running with the updating interval of : %f" %interval
tstop = 150
while(ecellManager.ses.getCurrentTime() < tstop):
ecellManager.ca['Value'] = caValue
ecellManager.ses.run(interval)
#ecellManager.ses.run(1)
#print ecellManager.ses.getCurrentTime()
print "immision of Calcium"
print "Value of Calcium %f" %ecellManager.ca.getProperty('Value')
spikes = 4
for i in range(spikes):
ecellManager.ca['Value'] = 7200
ecellManager.ses.run(0.020)
ecellManager.ca['Value'] = caValue
ecellManager.ses.run(0.010)
tstop = tstop+500
while(ecellManager.ses.getCurrentTime() < tstop):
ecellManager.ca['Value'] = caValue
ecellManager.ses.run(interval)
#ecellManager.ses.run(1)
#print ecellManager.ses.getCurrentTime()
ecellManager.converToTimeCourses()
print "ChangeCalciumValue Test Concluded"
return ecellManager
if __name__ == "__main__":
import sys
if len(sys.argv) != 2:
print("No parameter file supplied. Abort.")
usage = 'python ecellManager.py ecellControl.param'
print usage
sys.exit()
parameter_file = sys.argv[1]
param = parameters.ParameterSet(parameter_file)
## Setting the mat plotlib backend
import matplotlib
if param['interactive'] == False:
matplotlib.use('Agg')
print "Switching backend to Agg. Batch execution"
import matplotlib.pyplot as plt
from helpers.plotter import EcellPlotter
import helpers
loader = helpers.Loader()
# ecellManager = testChangeCalciumValue(interval, caValue)
if param['running_type'] == 'train':
ecellManager = testCalciumTrain(param['num_spikes'],
param['delay'],
param['biochemical_filename'])
ecp = EcellPlotter()
if param['interactive'] == False:
dir = loader.create_new_dir(prefix=os.getcwd())
loader.save(ecellManager.timeCourses, dir, "timeCourses")
ecp.plot_timeCourses(ecellManager.timeCourses, save=True, dir=dir)
ecp.plot_weight(ecellManager.timeCourses, dir=dir)
else:
ecp.plot_timeCourses(ecellManager.timeCourses)
ecp.plot_weight(ecellManager.timeCourses)
plt.show()
| bsd-3-clause |
hikaruAi/HPanda | HCameras.py | 3 | 3295 | from panda3d.core import Vec3,Point3,NodePath
class BaseCamera():
def __init__(self,scene):
self.scene=scene
base.disableMouse()
self.scene.Base.taskMgr.add(self.execute,"CameraAI",priority=128)
def execute(self,t):
pass
class ThirdPersonCamera(BaseCamera):
def __init__(self,scene,target,lookAtHeight,initX=0,initY=0,initZ=0,speed=1,debug=False):
BaseCamera.__init__(self,scene)
self.target=target
self.lookAtTarget=self.scene.loadEgg("HPanda/axis")
self.lookAtTarget.setScale(1)
self.lookAtTarget.setZ(self.target,lookAtHeight)
self.lookAtTarget.reparentTo(target)
#self.lookAtTarget.setScale(1)
self.scene.camera.setPos(self.scene.render,initX,initY,initZ)
self.scene.camera.lookAt(self.lookAtTarget)
self.positionTarget=self.scene.loadEgg("HPanda/axis")
self.positionTarget.setPos(self.scene.render,initX,initY,initZ)
self.positionTarget.reparentTo(self.target)
self.speed=speed
if debug is False:
self.lookAtTarget.hide()
self.positionTarget.hide()
def execute(self,t):
v = self.positionTarget.getPos( self.scene.camera )
if v.almostEqual(Vec3(0,0,0),0.01):
return t.cont
else:
self.scene.camera.setPos( self.scene.camera , v*globalClock.getDt()*self.speed )
self.scene.camera.lookAt( self.lookAtTarget )
return t.cont
class HMouseLook():
def __init__(self, showBase, xFact=500, yFact=500, applyH=True, applyP=True):
self.Base = showBase
self.xFact = xFact
self.yFact = yFact
self.centerMouse()
self.setTask()
self.centerX = base.win.getXSize() / 2
self.centerY = base.win.getXSize() / 2
self.dH = 0
self.dP = 0
self.applyH = applyH
self.applyP = applyP
self.hideMouse()
def hideMouse(self, value=True):
prop = WindowProperties()
prop.setCursorHidden(value)
base.win.requestProperties(prop)
def disable(self):
self.Base.removeTask("updateTask")
def enable(self):
self.setTask()
def setTask(self):
# if base.mouseWatcherNode.hasMouse():
self.Base.addTask(self.updateTask, "updateTask")
print "Task Added"
def updateTask(self, task):
try:
x = base.mouseWatcherNode.getMouseX()
y = base.mouseWatcherNode.getMouseY()
except:
self.centerMouse()
return task.cont
if x == 0:
self.dH = 0
if y == 0:
self.dP = 0
self.centerMouse()
if x == 0 and y == 0:
return task.cont
else:
dt = globalClock.getDt()
self.dH = -x * self.xFact * dt
self.dP = y * self.xFact * dt
if self.applyH:
self.Base.camera.setH(self.Base.camera, self.dH)
if self.applyP:
self.Base.camera.setP(self.Base.camera, self.dP)
self.Base.camera.setR(self.Base.render, 0)
self.centerMouse()
return task.cont
def centerMouse(self):
base.win.movePointer(0, base.win.getXSize() / 2, base.win.getYSize() / 2) | bsd-2-clause |
anakinsolo/backend | Lib/site-packages/setuptools/extension.py | 229 | 1649 | import sys
import re
import functools
import distutils.core
import distutils.errors
import distutils.extension
from .dist import _get_unpatched
from . import msvc9_support
_Extension = _get_unpatched(distutils.core.Extension)
msvc9_support.patch_for_specialized_compiler()
def _have_cython():
"""
Return True if Cython can be imported.
"""
cython_impl = 'Cython.Distutils.build_ext',
try:
# from (cython_impl) import build_ext
__import__(cython_impl, fromlist=['build_ext']).build_ext
return True
except Exception:
pass
return False
# for compatibility
have_pyrex = _have_cython
class Extension(_Extension):
"""Extension that uses '.c' files in place of '.pyx' files"""
def _convert_pyx_sources_to_lang(self):
"""
Replace sources with .pyx extensions to sources with the target
language extension. This mechanism allows language authors to supply
pre-converted sources but to prefer the .pyx sources.
"""
if _have_cython():
# the build has Cython, so allow it to compile the .pyx files
return
lang = self.language or ''
target_ext = '.cpp' if lang.lower() == 'c++' else '.c'
sub = functools.partial(re.sub, '.pyx$', target_ext)
self.sources = list(map(sub, self.sources))
class Library(Extension):
"""Just like a regular Extension, but built as a library instead"""
distutils.core.Extension = Extension
distutils.extension.Extension = Extension
if 'distutils.command.build_ext' in sys.modules:
sys.modules['distutils.command.build_ext'].Extension = Extension
| mit |
rs2/pandas | pandas/tests/arithmetic/conftest.py | 2 | 5981 | import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
# ------------------------------------------------------------------
# Helper Functions
def id_func(x):
if isinstance(x, tuple):
assert len(x) == 2
return x[0].__name__ + "-" + str(x[1])
else:
return x.__name__
# ------------------------------------------------------------------
@pytest.fixture(
params=[
("foo", None, None),
("Egon", "Venkman", None),
("NCC1701D", "NCC1701D", "NCC1701D"),
]
)
def names(request):
"""
A 3-tuple of names, the first two for operands, the last for a result.
"""
return request.param
@pytest.fixture(params=[1, np.array(1, dtype=np.int64)])
def one(request):
"""
Several variants of integer value 1. The zero-dim integer array
behaves like an integer.
This fixture can be used to check that datetimelike indexes handle
addition and subtraction of integers and zero-dimensional arrays
of integers.
Examples
--------
>>> dti = pd.date_range('2016-01-01', periods=2, freq='H')
>>> dti
DatetimeIndex(['2016-01-01 00:00:00', '2016-01-01 01:00:00'],
dtype='datetime64[ns]', freq='H')
>>> dti + one
DatetimeIndex(['2016-01-01 01:00:00', '2016-01-01 02:00:00'],
dtype='datetime64[ns]', freq='H')
"""
return request.param
zeros = [
box_cls([0] * 5, dtype=dtype)
for box_cls in [pd.Index, np.array]
for dtype in [np.int64, np.uint64, np.float64]
]
zeros.extend(
[box_cls([-0.0] * 5, dtype=np.float64) for box_cls in [pd.Index, np.array]]
)
zeros.extend([np.array(0, dtype=dtype) for dtype in [np.int64, np.uint64, np.float64]])
zeros.extend([np.array(-0.0, dtype=np.float64)])
zeros.extend([0, 0.0, -0.0])
@pytest.fixture(params=zeros)
def zero(request):
"""
Several types of scalar zeros and length 5 vectors of zeros.
This fixture can be used to check that numeric-dtype indexes handle
division by any zero numeric-dtype.
Uses vector of length 5 for broadcasting with `numeric_idx` fixture,
which creates numeric-dtype vectors also of length 5.
Examples
--------
>>> arr = pd.RangeIndex(5)
>>> arr / zeros
Float64Index([nan, inf, inf, inf, inf], dtype='float64')
"""
return request.param
# ------------------------------------------------------------------
# Vector Fixtures
@pytest.fixture(
params=[
pd.Float64Index(np.arange(5, dtype="float64")),
pd.Int64Index(np.arange(5, dtype="int64")),
pd.UInt64Index(np.arange(5, dtype="uint64")),
pd.RangeIndex(5),
],
ids=lambda x: type(x).__name__,
)
def numeric_idx(request):
"""
Several types of numeric-dtypes Index objects
"""
return request.param
# ------------------------------------------------------------------
# Scalar Fixtures
@pytest.fixture(
params=[
pd.Timedelta("5m4s").to_pytimedelta(),
pd.Timedelta("5m4s"),
pd.Timedelta("5m4s").to_timedelta64(),
],
ids=lambda x: type(x).__name__,
)
def scalar_td(request):
"""
Several variants of Timedelta scalars representing 5 minutes and 4 seconds
"""
return request.param
@pytest.fixture(
params=[
pd.offsets.Day(3),
pd.offsets.Hour(72),
pd.Timedelta(days=3).to_pytimedelta(),
pd.Timedelta("72:00:00"),
np.timedelta64(3, "D"),
np.timedelta64(72, "h"),
],
ids=lambda x: type(x).__name__,
)
def three_days(request):
"""
Several timedelta-like and DateOffset objects that each represent
a 3-day timedelta
"""
return request.param
@pytest.fixture(
params=[
pd.offsets.Hour(2),
pd.offsets.Minute(120),
pd.Timedelta(hours=2).to_pytimedelta(),
pd.Timedelta(seconds=2 * 3600),
np.timedelta64(2, "h"),
np.timedelta64(120, "m"),
],
ids=lambda x: type(x).__name__,
)
def two_hours(request):
"""
Several timedelta-like and DateOffset objects that each represent
a 2-hour timedelta
"""
return request.param
_common_mismatch = [
pd.offsets.YearBegin(2),
pd.offsets.MonthBegin(1),
pd.offsets.Minute(),
]
@pytest.fixture(
params=[
pd.Timedelta(minutes=30).to_pytimedelta(),
np.timedelta64(30, "s"),
pd.Timedelta(seconds=30),
]
+ _common_mismatch
)
def not_hourly(request):
"""
Several timedelta-like and DateOffset instances that are _not_
compatible with Hourly frequencies.
"""
return request.param
@pytest.fixture(
params=[
np.timedelta64(4, "h"),
pd.Timedelta(hours=23).to_pytimedelta(),
pd.Timedelta("23:00:00"),
]
+ _common_mismatch
)
def not_daily(request):
"""
Several timedelta-like and DateOffset instances that are _not_
compatible with Daily frequencies.
"""
return request.param
@pytest.fixture(
params=[
np.timedelta64(365, "D"),
pd.Timedelta(days=365).to_pytimedelta(),
pd.Timedelta(days=365),
]
+ _common_mismatch
)
def mismatched_freq(request):
"""
Several timedelta-like and DateOffset instances that are _not_
compatible with Monthly or Annual frequencies.
"""
return request.param
# ------------------------------------------------------------------
@pytest.fixture(params=[pd.Index, pd.Series, pd.DataFrame], ids=id_func)
def box(request):
"""
Several array-like containers that should have effectively identical
behavior with respect to arithmetic operations.
"""
return request.param
@pytest.fixture(params=[pd.Index, pd.Series, pd.DataFrame, tm.to_array], ids=id_func)
def box_with_array(request):
"""
Fixture to test behavior for Index, Series, DataFrame, and pandas Array
classes
"""
return request.param
# alias so we can use the same fixture for multiple parameters in a test
box_with_array2 = box_with_array
| bsd-3-clause |
iseppi/zookeepr | zkpylons/controllers/checkin.py | 5 | 8387 | import logging
import datetime
from pylons import request, response, session, tmpl_context as c
from zkpylons.lib.helpers import redirect_to
from pylons.decorators import validate
from pylons.decorators.rest import dispatch_on
from pylons.decorators import jsonify
from formencode import validators, htmlfill, ForEach, Invalid
from formencode.variabledecode import NestedVariables
from zkpylons.lib.base import BaseController, render
from zkpylons.lib.validators import BaseSchema, ProductValidator
import zkpylons.lib.helpers as h
import sqlalchemy as sa
from sqlalchemy.sql.expression import cast
from authkit.authorize.pylons_adaptors import authorize
from authkit.permissions import ValidAuthKitUser
from zkpylons.lib.mail import email
from zkpylons.model import meta, Person, FulfilmentGroup, Fulfilment, FulfilmentItem, Proposal
log = logging.getLogger(__name__)
class CheckinController(BaseController):
@authorize(h.auth.Or(h.auth.has_organiser_role, h.auth.HasZookeeprRole('checkin')))
def __before__(self, **kwargs):
pass
@jsonify
def lookup(self):
q = request.params['q']
# Assume that the SQL library handles the SQL attack vectors
person_query = meta.Session.query(Person.id, sa.func.concat(Person.fullname, " - ", Person.email_address).label("pretty")).filter(sa.or_(
Person.lastname.ilike(q + '%'),
Person.fullname.ilike(q + '%'),
Person.email_address.ilike(q + '%'),
Person.email_address.ilike('%@' + q + '%'),
))
personid_query = meta.Session.query(Person.id, sa.func.concat(cast(Person.id, sa.String), " - ", Person.fullname).label("pretty")).filter(
cast(Person.id, sa.String).like(q + '%'),
)
boarding_query = meta.Session.query(Person.id, sa.func.concat(FulfilmentGroup.code, " - ", Person.fullname).label("pretty")).join(FulfilmentGroup).filter(
FulfilmentGroup.code.ilike(q + '%')
)
badge_query = meta.Session.query(Person.id, sa.func.concat(Fulfilment.code, " - ", Person.fullname).label("pretty")).join(Fulfilment).filter(
Fulfilment.code.ilike(q + '%')
)
union_query = person_query.union(personid_query, boarding_query, badge_query).order_by("pretty").limit(5)
return dict(r=list(union_query.all()))
@jsonify
def person_data(self):
"""
Return the fulfilment data for a person in json format.
Use direct sql queries because I am too lazy to work out how James' magic queries work.
"""
id = request.params['id']
id = int(id)
# Assume that the SQL library sanitizes this somehow somewhere
person_qry_text = """select p.*
from person p
where p.id = %d
""" % id
notes_qry = """select rn.*
from rego_note rn
join registration r on rn.rego_id = r.id
where r.person_id = %d
""" % id
fulfilments_qry = """select f.*, fs.name fulfilment_status, ft.name fulfilment_type
from fulfilment f,
fulfilment_status fs ,
fulfilment_type ft
where person_id = %d
and fs.id = f.status_id
and ft.id = f.type_id
""" % id
fulfilment_items_qry = """
select fi.*, CONCAT(c.name, ' - ', p.description) AS description
from fulfilment_item fi,
product p,
product_category c
where fulfilment_id = %d
and fi.product_id = p.id
and p.category_id = c.id
"""
person_qry = sa.sql.text(person_qry_text)
row = meta.Session.execute(person_qry).fetchone()
person = {}
person.update(row)
for k in person.keys():
if isinstance(person[k], datetime.datetime):
person[k] = person[k].strftime('%d/%m/%Y')
notes = []
for n_row in meta.Session.execute(notes_qry):
note = {}
note.update(n_row)
for k in note.keys():
if isinstance(note[k], datetime.datetime):
note[k] = note[k].strftime('%d/%m/%Y')
notes.append(note)
person['notes'] = notes
fulfilments = []
for f_row in meta.Session.execute(fulfilments_qry).fetchall():
fulfilment = {}
fulfilment.update(f_row)
for k in fulfilment.keys():
if isinstance(fulfilment[k], datetime.datetime):
fulfilment[k] = fulfilment[k].strftime('%d/%m/%Y')
fulfilment_items = []
for fi_row in meta.Session.execute(fulfilment_items_qry % fulfilment['id']).fetchall():
fulfilment_item = {}
fulfilment_item.update(fi_row)
for k in fulfilment_item.keys():
if isinstance(fulfilment_item[k], datetime.datetime):
fulfilment_item[k] = fulfilment_item[k].strftime('%d/%m/%Y')
fulfilment_items.append(fulfilment_item)
fulfilment['fulfilment_items'] = fulfilment_items
fulfilments.append(fulfilment)
person['fulfilments'] = fulfilments
return person
#@jsonify
def update_fulfilments(self):
"""
Allow the updating of fulfilment data via json.
Only allow a subset of the columns in the tables to be updated. In particular do
not allow the primary keys or the fulfilment_id on the fulfilmment_item table to be changed.
TODO:
If we don't get all of the fulfilment_items for a fulfilment then throw an error
If qty for an item is zero then we should delete it.
"""
import json
debug = ""
data = request.params['data']
data = json.loads(data)
for fulfilment in data['fulfilments']:
db_fulfilment = Fulfilment.find_by_id(int(fulfilment['id']), abort_404=False)
db_fulfilment.type_id = fulfilment['type_id']
db_fulfilment.status_id = fulfilment['status_id']
db_fulfilment.code = fulfilment['code']
meta.Session.add(db_fulfilment)
for fulfilment_item in fulfilment['fulfilment_items']:
db_fulfilment_item = FulfilmentItem.find_by_id(fulfilment_item['id'], abort_404=False)
db_fulfilment_item.product_id = fulfilment_item['product_id']
db_fulfilment_item.product_text = fulfilment_item['product_text']
db_fulfilment_item.qty = int(fulfilment_item['qty'])
meta.Session.add(db_fulfilment_item)
meta.Session.commit()
debug += "Committed changes\n"
return debug
raise Exception( 'Success')
@jsonify
def get_talk(self):
if 1:
id = request.params['id']
id = int(id)
bio_qry = """
SELECT string_agg(concat('<span class="name">', person.firstname, ' ', person.lastname, E'</span>\n', '<p class="bio">', person.bio, '</p>'), E'\n') AS bio
FROM event
LEFT JOIN proposal ON (event.proposal_id = proposal.id)
INNER JOIN person_proposal_map USING (proposal_id)
INNER JOIN person ON (person.id = person_id)
WHERE event.id = %d
GROUP BY event.id
""" % id
bio = meta.Session.execute(bio_qry).fetchone()
return bio['bio']
else:
id = request.params['id']
id = int(id)
talk = meta.Session.execute('SELECT * from proposal where id = %d' %id).fetchone()
talk_dict = {}
talk_dict.update(talk)
del talk_dict['creation_timestamp']
del talk_dict['last_modification_timestamp']
return talk_dict
def bio_list(self):
return render('/checkin/bio_list.mako')
| gpl-2.0 |
MattDevo/edk2 | AppPkg/Applications/Python/Python-2.7.10/Lib/encodings/cp874.py | 93 | 13158 | """ Python Character Mapping Codec cp874 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP874.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp874',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u20ac' # 0x80 -> EURO SIGN
u'\ufffe' # 0x81 -> UNDEFINED
u'\ufffe' # 0x82 -> UNDEFINED
u'\ufffe' # 0x83 -> UNDEFINED
u'\ufffe' # 0x84 -> UNDEFINED
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\ufffe' # 0x86 -> UNDEFINED
u'\ufffe' # 0x87 -> UNDEFINED
u'\ufffe' # 0x88 -> UNDEFINED
u'\ufffe' # 0x89 -> UNDEFINED
u'\ufffe' # 0x8A -> UNDEFINED
u'\ufffe' # 0x8B -> UNDEFINED
u'\ufffe' # 0x8C -> UNDEFINED
u'\ufffe' # 0x8D -> UNDEFINED
u'\ufffe' # 0x8E -> UNDEFINED
u'\ufffe' # 0x8F -> UNDEFINED
u'\ufffe' # 0x90 -> UNDEFINED
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\ufffe' # 0x98 -> UNDEFINED
u'\ufffe' # 0x99 -> UNDEFINED
u'\ufffe' # 0x9A -> UNDEFINED
u'\ufffe' # 0x9B -> UNDEFINED
u'\ufffe' # 0x9C -> UNDEFINED
u'\ufffe' # 0x9D -> UNDEFINED
u'\ufffe' # 0x9E -> UNDEFINED
u'\ufffe' # 0x9F -> UNDEFINED
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0e01' # 0xA1 -> THAI CHARACTER KO KAI
u'\u0e02' # 0xA2 -> THAI CHARACTER KHO KHAI
u'\u0e03' # 0xA3 -> THAI CHARACTER KHO KHUAT
u'\u0e04' # 0xA4 -> THAI CHARACTER KHO KHWAI
u'\u0e05' # 0xA5 -> THAI CHARACTER KHO KHON
u'\u0e06' # 0xA6 -> THAI CHARACTER KHO RAKHANG
u'\u0e07' # 0xA7 -> THAI CHARACTER NGO NGU
u'\u0e08' # 0xA8 -> THAI CHARACTER CHO CHAN
u'\u0e09' # 0xA9 -> THAI CHARACTER CHO CHING
u'\u0e0a' # 0xAA -> THAI CHARACTER CHO CHANG
u'\u0e0b' # 0xAB -> THAI CHARACTER SO SO
u'\u0e0c' # 0xAC -> THAI CHARACTER CHO CHOE
u'\u0e0d' # 0xAD -> THAI CHARACTER YO YING
u'\u0e0e' # 0xAE -> THAI CHARACTER DO CHADA
u'\u0e0f' # 0xAF -> THAI CHARACTER TO PATAK
u'\u0e10' # 0xB0 -> THAI CHARACTER THO THAN
u'\u0e11' # 0xB1 -> THAI CHARACTER THO NANGMONTHO
u'\u0e12' # 0xB2 -> THAI CHARACTER THO PHUTHAO
u'\u0e13' # 0xB3 -> THAI CHARACTER NO NEN
u'\u0e14' # 0xB4 -> THAI CHARACTER DO DEK
u'\u0e15' # 0xB5 -> THAI CHARACTER TO TAO
u'\u0e16' # 0xB6 -> THAI CHARACTER THO THUNG
u'\u0e17' # 0xB7 -> THAI CHARACTER THO THAHAN
u'\u0e18' # 0xB8 -> THAI CHARACTER THO THONG
u'\u0e19' # 0xB9 -> THAI CHARACTER NO NU
u'\u0e1a' # 0xBA -> THAI CHARACTER BO BAIMAI
u'\u0e1b' # 0xBB -> THAI CHARACTER PO PLA
u'\u0e1c' # 0xBC -> THAI CHARACTER PHO PHUNG
u'\u0e1d' # 0xBD -> THAI CHARACTER FO FA
u'\u0e1e' # 0xBE -> THAI CHARACTER PHO PHAN
u'\u0e1f' # 0xBF -> THAI CHARACTER FO FAN
u'\u0e20' # 0xC0 -> THAI CHARACTER PHO SAMPHAO
u'\u0e21' # 0xC1 -> THAI CHARACTER MO MA
u'\u0e22' # 0xC2 -> THAI CHARACTER YO YAK
u'\u0e23' # 0xC3 -> THAI CHARACTER RO RUA
u'\u0e24' # 0xC4 -> THAI CHARACTER RU
u'\u0e25' # 0xC5 -> THAI CHARACTER LO LING
u'\u0e26' # 0xC6 -> THAI CHARACTER LU
u'\u0e27' # 0xC7 -> THAI CHARACTER WO WAEN
u'\u0e28' # 0xC8 -> THAI CHARACTER SO SALA
u'\u0e29' # 0xC9 -> THAI CHARACTER SO RUSI
u'\u0e2a' # 0xCA -> THAI CHARACTER SO SUA
u'\u0e2b' # 0xCB -> THAI CHARACTER HO HIP
u'\u0e2c' # 0xCC -> THAI CHARACTER LO CHULA
u'\u0e2d' # 0xCD -> THAI CHARACTER O ANG
u'\u0e2e' # 0xCE -> THAI CHARACTER HO NOKHUK
u'\u0e2f' # 0xCF -> THAI CHARACTER PAIYANNOI
u'\u0e30' # 0xD0 -> THAI CHARACTER SARA A
u'\u0e31' # 0xD1 -> THAI CHARACTER MAI HAN-AKAT
u'\u0e32' # 0xD2 -> THAI CHARACTER SARA AA
u'\u0e33' # 0xD3 -> THAI CHARACTER SARA AM
u'\u0e34' # 0xD4 -> THAI CHARACTER SARA I
u'\u0e35' # 0xD5 -> THAI CHARACTER SARA II
u'\u0e36' # 0xD6 -> THAI CHARACTER SARA UE
u'\u0e37' # 0xD7 -> THAI CHARACTER SARA UEE
u'\u0e38' # 0xD8 -> THAI CHARACTER SARA U
u'\u0e39' # 0xD9 -> THAI CHARACTER SARA UU
u'\u0e3a' # 0xDA -> THAI CHARACTER PHINTHU
u'\ufffe' # 0xDB -> UNDEFINED
u'\ufffe' # 0xDC -> UNDEFINED
u'\ufffe' # 0xDD -> UNDEFINED
u'\ufffe' # 0xDE -> UNDEFINED
u'\u0e3f' # 0xDF -> THAI CURRENCY SYMBOL BAHT
u'\u0e40' # 0xE0 -> THAI CHARACTER SARA E
u'\u0e41' # 0xE1 -> THAI CHARACTER SARA AE
u'\u0e42' # 0xE2 -> THAI CHARACTER SARA O
u'\u0e43' # 0xE3 -> THAI CHARACTER SARA AI MAIMUAN
u'\u0e44' # 0xE4 -> THAI CHARACTER SARA AI MAIMALAI
u'\u0e45' # 0xE5 -> THAI CHARACTER LAKKHANGYAO
u'\u0e46' # 0xE6 -> THAI CHARACTER MAIYAMOK
u'\u0e47' # 0xE7 -> THAI CHARACTER MAITAIKHU
u'\u0e48' # 0xE8 -> THAI CHARACTER MAI EK
u'\u0e49' # 0xE9 -> THAI CHARACTER MAI THO
u'\u0e4a' # 0xEA -> THAI CHARACTER MAI TRI
u'\u0e4b' # 0xEB -> THAI CHARACTER MAI CHATTAWA
u'\u0e4c' # 0xEC -> THAI CHARACTER THANTHAKHAT
u'\u0e4d' # 0xED -> THAI CHARACTER NIKHAHIT
u'\u0e4e' # 0xEE -> THAI CHARACTER YAMAKKAN
u'\u0e4f' # 0xEF -> THAI CHARACTER FONGMAN
u'\u0e50' # 0xF0 -> THAI DIGIT ZERO
u'\u0e51' # 0xF1 -> THAI DIGIT ONE
u'\u0e52' # 0xF2 -> THAI DIGIT TWO
u'\u0e53' # 0xF3 -> THAI DIGIT THREE
u'\u0e54' # 0xF4 -> THAI DIGIT FOUR
u'\u0e55' # 0xF5 -> THAI DIGIT FIVE
u'\u0e56' # 0xF6 -> THAI DIGIT SIX
u'\u0e57' # 0xF7 -> THAI DIGIT SEVEN
u'\u0e58' # 0xF8 -> THAI DIGIT EIGHT
u'\u0e59' # 0xF9 -> THAI DIGIT NINE
u'\u0e5a' # 0xFA -> THAI CHARACTER ANGKHANKHU
u'\u0e5b' # 0xFB -> THAI CHARACTER KHOMUT
u'\ufffe' # 0xFC -> UNDEFINED
u'\ufffe' # 0xFD -> UNDEFINED
u'\ufffe' # 0xFE -> UNDEFINED
u'\ufffe' # 0xFF -> UNDEFINED
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| bsd-2-clause |
julien78910/CouchPotatoServer | libs/guessit/transfo/guess_language.py | 94 | 1946 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2012 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit import Guess
from guessit.transfo import SingleNodeGuesser
from guessit.language import search_language
import logging
log = logging.getLogger(__name__)
def guess_language(string, node, skip=None):
if skip:
relative_skip = []
for entry in skip:
node_idx = entry['node_idx']
span = entry['span']
if node_idx == node.node_idx[:len(node_idx)]:
relative_span = (span[0] - node.offset + 1, span[1] - node.offset + 1)
relative_skip.append(relative_span)
skip = relative_skip
language, span, confidence = search_language(string, skip=skip)
if language:
return (Guess({'language': language},
confidence=confidence,
raw= string[span[0]:span[1]]),
span)
return None, None
guess_language.use_node = True
def process(mtree, *args, **kwargs):
SingleNodeGuesser(guess_language, None, log, *args, **kwargs).process(mtree)
# Note: 'language' is promoted to 'subtitleLanguage' in the post_process transfo
| gpl-3.0 |
samedder/azure-cli | src/command_modules/azure-cli-acr/azure_bdist_wheel.py | 199 | 21114 | """
"wheel" copyright (c) 2012-2017 Daniel Holth <dholth@fastmail.fm> and
contributors.
The MIT License
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
Create a Azure wheel (.whl) distribution (a wheel is a built archive format).
This file is a copy of the official bdist_wheel file from wheel 0.30.0a0, enhanced
of the bottom with some Microsoft extension for Azure SDK for Python
"""
import csv
import hashlib
import os
import subprocess
import warnings
import shutil
import json
import sys
try:
import sysconfig
except ImportError: # pragma nocover
# Python < 2.7
import distutils.sysconfig as sysconfig
import pkg_resources
safe_name = pkg_resources.safe_name
safe_version = pkg_resources.safe_version
from shutil import rmtree
from email.generator import Generator
from distutils.core import Command
from distutils.sysconfig import get_python_version
from distutils import log as logger
from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag, get_platform
from wheel.util import native, open_for_csv
from wheel.archive import archive_wheelfile
from wheel.pkginfo import read_pkg_info, write_pkg_info
from wheel.metadata import pkginfo_to_dict
from wheel import pep425tags, metadata
from wheel import __version__ as wheel_version
def safer_name(name):
return safe_name(name).replace('-', '_')
def safer_version(version):
return safe_version(version).replace('-', '_')
class bdist_wheel(Command):
description = 'create a wheel distribution'
user_options = [('bdist-dir=', 'b',
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('relative', None,
"build the archive using relative paths"
"(default: false)"),
('owner=', 'u',
"Owner name used when creating a tar file"
" [default: current user]"),
('group=', 'g',
"Group name used when creating a tar file"
" [default: current group]"),
('universal', None,
"make a universal wheel"
" (default: false)"),
('python-tag=', None,
"Python implementation compatibility tag"
" (default: py%s)" % get_impl_ver()[0]),
]
boolean_options = ['keep-temp', 'skip-build', 'relative', 'universal']
def initialize_options(self):
self.bdist_dir = None
self.data_dir = None
self.plat_name = None
self.plat_tag = None
self.format = 'zip'
self.keep_temp = False
self.dist_dir = None
self.distinfo_dir = None
self.egginfo_dir = None
self.root_is_pure = None
self.skip_build = None
self.relative = False
self.owner = None
self.group = None
self.universal = False
self.python_tag = 'py' + get_impl_ver()[0]
self.plat_name_supplied = False
def finalize_options(self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'wheel')
self.data_dir = self.wheel_dist_name + '.data'
self.plat_name_supplied = self.plat_name is not None
need_options = ('dist_dir', 'plat_name', 'skip_build')
self.set_undefined_options('bdist',
*zip(need_options, need_options))
self.root_is_pure = not (self.distribution.has_ext_modules()
or self.distribution.has_c_libraries())
# Support legacy [wheel] section for setting universal
wheel = self.distribution.get_option_dict('wheel')
if 'universal' in wheel:
# please don't define this in your global configs
val = wheel['universal'][1].strip()
if val.lower() in ('1', 'true', 'yes'):
self.universal = True
@property
def wheel_dist_name(self):
"""Return distribution full name with - replaced with _"""
return '-'.join((safer_name(self.distribution.get_name()),
safer_version(self.distribution.get_version())))
def get_tag(self):
# bdist sets self.plat_name if unset, we should only use it for purepy
# wheels if the user supplied it.
if self.plat_name_supplied:
plat_name = self.plat_name
elif self.root_is_pure:
plat_name = 'any'
else:
plat_name = self.plat_name or get_platform()
if plat_name in ('linux-x86_64', 'linux_x86_64') and sys.maxsize == 2147483647:
plat_name = 'linux_i686'
plat_name = plat_name.replace('-', '_').replace('.', '_')
if self.root_is_pure:
if self.universal:
impl = 'py2.py3'
else:
impl = self.python_tag
tag = (impl, 'none', plat_name)
else:
impl_name = get_abbr_impl()
impl_ver = get_impl_ver()
# PEP 3149
abi_tag = str(get_abi_tag()).lower()
tag = (impl_name + impl_ver, abi_tag, plat_name)
supported_tags = pep425tags.get_supported(
supplied_platform=plat_name if self.plat_name_supplied else None)
# XXX switch to this alternate implementation for non-pure:
assert tag == supported_tags[0], "%s != %s" % (tag, supported_tags[0])
return tag
def get_archive_basename(self):
"""Return archive name without extension"""
impl_tag, abi_tag, plat_tag = self.get_tag()
archive_basename = "%s-%s-%s-%s" % (
self.wheel_dist_name,
impl_tag,
abi_tag,
plat_tag)
return archive_basename
def run(self):
build_scripts = self.reinitialize_command('build_scripts')
build_scripts.executable = 'python'
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install',
reinit_subcommands=True)
install.root = self.bdist_dir
install.compile = False
install.skip_build = self.skip_build
install.warn_dir = False
# A wheel without setuptools scripts is more cross-platform.
# Use the (undocumented) `no_ep` option to setuptools'
# install_scripts command to avoid creating entry point scripts.
install_scripts = self.reinitialize_command('install_scripts')
install_scripts.no_ep = True
# Use a custom scheme for the archive, because we have to decide
# at installation time which scheme to use.
for key in ('headers', 'scripts', 'data', 'purelib', 'platlib'):
setattr(install,
'install_' + key,
os.path.join(self.data_dir, key))
basedir_observed = ''
if os.name == 'nt':
# win32 barfs if any of these are ''; could be '.'?
# (distutils.command.install:change_roots bug)
basedir_observed = os.path.normpath(os.path.join(self.data_dir, '..'))
self.install_libbase = self.install_lib = basedir_observed
setattr(install,
'install_purelib' if self.root_is_pure else 'install_platlib',
basedir_observed)
logger.info("installing to %s", self.bdist_dir)
self.run_command('install')
archive_basename = self.get_archive_basename()
pseudoinstall_root = os.path.join(self.dist_dir, archive_basename)
if not self.relative:
archive_root = self.bdist_dir
else:
archive_root = os.path.join(
self.bdist_dir,
self._ensure_relative(install.install_base))
self.set_undefined_options(
'install_egg_info', ('target', 'egginfo_dir'))
self.distinfo_dir = os.path.join(self.bdist_dir,
'%s.dist-info' % self.wheel_dist_name)
self.egg2dist(self.egginfo_dir,
self.distinfo_dir)
self.write_wheelfile(self.distinfo_dir)
self.write_record(self.bdist_dir, self.distinfo_dir)
# Make the archive
if not os.path.exists(self.dist_dir):
os.makedirs(self.dist_dir)
wheel_name = archive_wheelfile(pseudoinstall_root, archive_root)
# Sign the archive
if 'WHEEL_TOOL' in os.environ:
subprocess.call([os.environ['WHEEL_TOOL'], 'sign', wheel_name])
# Add to 'Distribution.dist_files' so that the "upload" command works
getattr(self.distribution, 'dist_files', []).append(
('bdist_wheel', get_python_version(), wheel_name))
if not self.keep_temp:
if self.dry_run:
logger.info('removing %s', self.bdist_dir)
else:
rmtree(self.bdist_dir)
def write_wheelfile(self, wheelfile_base, generator='bdist_wheel (' + wheel_version + ')'):
from email.message import Message
msg = Message()
msg['Wheel-Version'] = '1.0' # of the spec
msg['Generator'] = generator
msg['Root-Is-Purelib'] = str(self.root_is_pure).lower()
# Doesn't work for bdist_wininst
impl_tag, abi_tag, plat_tag = self.get_tag()
for impl in impl_tag.split('.'):
for abi in abi_tag.split('.'):
for plat in plat_tag.split('.'):
msg['Tag'] = '-'.join((impl, abi, plat))
wheelfile_path = os.path.join(wheelfile_base, 'WHEEL')
logger.info('creating %s', wheelfile_path)
with open(wheelfile_path, 'w') as f:
Generator(f, maxheaderlen=0).flatten(msg)
def _ensure_relative(self, path):
# copied from dir_util, deleted
drive, path = os.path.splitdrive(path)
if path[0:1] == os.sep:
path = drive + path[1:]
return path
def _pkginfo_to_metadata(self, egg_info_path, pkginfo_path):
return metadata.pkginfo_to_metadata(egg_info_path, pkginfo_path)
def license_file(self):
"""Return license filename from a license-file key in setup.cfg, or None."""
metadata = self.distribution.get_option_dict('metadata')
if not 'license_file' in metadata:
return None
return metadata['license_file'][1]
def setupcfg_requirements(self):
"""Generate requirements from setup.cfg as
('Requires-Dist', 'requirement; qualifier') tuples. From a metadata
section in setup.cfg:
[metadata]
provides-extra = extra1
extra2
requires-dist = requirement; qualifier
another; qualifier2
unqualified
Yields
('Provides-Extra', 'extra1'),
('Provides-Extra', 'extra2'),
('Requires-Dist', 'requirement; qualifier'),
('Requires-Dist', 'another; qualifier2'),
('Requires-Dist', 'unqualified')
"""
metadata = self.distribution.get_option_dict('metadata')
# our .ini parser folds - to _ in key names:
for key, title in (('provides_extra', 'Provides-Extra'),
('requires_dist', 'Requires-Dist')):
if not key in metadata:
continue
field = metadata[key]
for line in field[1].splitlines():
line = line.strip()
if not line:
continue
yield (title, line)
def add_requirements(self, metadata_path):
"""Add additional requirements from setup.cfg to file metadata_path"""
additional = list(self.setupcfg_requirements())
if not additional: return
pkg_info = read_pkg_info(metadata_path)
if 'Provides-Extra' in pkg_info or 'Requires-Dist' in pkg_info:
warnings.warn('setup.cfg requirements overwrite values from setup.py')
del pkg_info['Provides-Extra']
del pkg_info['Requires-Dist']
for k, v in additional:
pkg_info[k] = v
write_pkg_info(metadata_path, pkg_info)
def egg2dist(self, egginfo_path, distinfo_path):
"""Convert an .egg-info directory into a .dist-info directory"""
def adios(p):
"""Appropriately delete directory, file or link."""
if os.path.exists(p) and not os.path.islink(p) and os.path.isdir(p):
shutil.rmtree(p)
elif os.path.exists(p):
os.unlink(p)
adios(distinfo_path)
if not os.path.exists(egginfo_path):
# There is no egg-info. This is probably because the egg-info
# file/directory is not named matching the distribution name used
# to name the archive file. Check for this case and report
# accordingly.
import glob
pat = os.path.join(os.path.dirname(egginfo_path), '*.egg-info')
possible = glob.glob(pat)
err = "Egg metadata expected at %s but not found" % (egginfo_path,)
if possible:
alt = os.path.basename(possible[0])
err += " (%s found - possible misnamed archive file?)" % (alt,)
raise ValueError(err)
if os.path.isfile(egginfo_path):
# .egg-info is a single file
pkginfo_path = egginfo_path
pkg_info = self._pkginfo_to_metadata(egginfo_path, egginfo_path)
os.mkdir(distinfo_path)
else:
# .egg-info is a directory
pkginfo_path = os.path.join(egginfo_path, 'PKG-INFO')
pkg_info = self._pkginfo_to_metadata(egginfo_path, pkginfo_path)
# ignore common egg metadata that is useless to wheel
shutil.copytree(egginfo_path, distinfo_path,
ignore=lambda x, y: set(('PKG-INFO',
'requires.txt',
'SOURCES.txt',
'not-zip-safe',)))
# delete dependency_links if it is only whitespace
dependency_links_path = os.path.join(distinfo_path, 'dependency_links.txt')
with open(dependency_links_path, 'r') as dependency_links_file:
dependency_links = dependency_links_file.read().strip()
if not dependency_links:
adios(dependency_links_path)
write_pkg_info(os.path.join(distinfo_path, 'METADATA'), pkg_info)
# XXX deprecated. Still useful for current distribute/setuptools.
metadata_path = os.path.join(distinfo_path, 'METADATA')
self.add_requirements(metadata_path)
# XXX intentionally a different path than the PEP.
metadata_json_path = os.path.join(distinfo_path, 'metadata.json')
pymeta = pkginfo_to_dict(metadata_path,
distribution=self.distribution)
if 'description' in pymeta:
description_filename = 'DESCRIPTION.rst'
description_text = pymeta.pop('description')
description_path = os.path.join(distinfo_path,
description_filename)
with open(description_path, "wb") as description_file:
description_file.write(description_text.encode('utf-8'))
pymeta['extensions']['python.details']['document_names']['description'] = description_filename
# XXX heuristically copy any LICENSE/LICENSE.txt?
license = self.license_file()
if license:
license_filename = 'LICENSE.txt'
shutil.copy(license, os.path.join(self.distinfo_dir, license_filename))
pymeta['extensions']['python.details']['document_names']['license'] = license_filename
with open(metadata_json_path, "w") as metadata_json:
json.dump(pymeta, metadata_json, sort_keys=True)
adios(egginfo_path)
def write_record(self, bdist_dir, distinfo_dir):
from wheel.util import urlsafe_b64encode
record_path = os.path.join(distinfo_dir, 'RECORD')
record_relpath = os.path.relpath(record_path, bdist_dir)
def walk():
for dir, dirs, files in os.walk(bdist_dir):
dirs.sort()
for f in sorted(files):
yield os.path.join(dir, f)
def skip(path):
"""Wheel hashes every possible file."""
return (path == record_relpath)
with open_for_csv(record_path, 'w+') as record_file:
writer = csv.writer(record_file)
for path in walk():
relpath = os.path.relpath(path, bdist_dir)
if skip(relpath):
hash = ''
size = ''
else:
with open(path, 'rb') as f:
data = f.read()
digest = hashlib.sha256(data).digest()
hash = 'sha256=' + native(urlsafe_b64encode(digest))
size = len(data)
record_path = os.path.relpath(
path, bdist_dir).replace(os.path.sep, '/')
writer.writerow((record_path, hash, size))
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
from distutils import log as logger
import os.path
#from wheel.bdist_wheel import bdist_wheel
class azure_bdist_wheel(bdist_wheel):
description = "Create an Azure wheel distribution"
user_options = bdist_wheel.user_options + \
[('azure-namespace-package=', None,
"Name of the deepest nspkg used")]
def initialize_options(self):
bdist_wheel.initialize_options(self)
self.azure_namespace_package = None
def finalize_options(self):
bdist_wheel.finalize_options(self)
if self.azure_namespace_package and not self.azure_namespace_package.endswith("-nspkg"):
raise ValueError("azure_namespace_package must finish by -nspkg")
def run(self):
if not self.distribution.install_requires:
self.distribution.install_requires = []
self.distribution.install_requires.append(
"{}>=2.0.0".format(self.azure_namespace_package.replace('_', '-')))
bdist_wheel.run(self)
def write_record(self, bdist_dir, distinfo_dir):
if self.azure_namespace_package:
# Split and remove last part, assuming it's "nspkg"
subparts = self.azure_namespace_package.split('-')[0:-1]
folder_with_init = [os.path.join(*subparts[0:i+1]) for i in range(len(subparts))]
for azure_sub_package in folder_with_init:
init_file = os.path.join(bdist_dir, azure_sub_package, '__init__.py')
if os.path.isfile(init_file):
logger.info("manually remove {} while building the wheel".format(init_file))
os.remove(init_file)
else:
raise ValueError("Unable to find {}. Are you sure of your namespace package?".format(init_file))
bdist_wheel.write_record(self, bdist_dir, distinfo_dir)
cmdclass = {
'bdist_wheel': azure_bdist_wheel,
}
| mit |
nitin-cherian/LifeLongLearning | Web_Development_Python/RealPython/flask-hello-world/env/lib/python3.5/site-packages/click/_winconsole.py | 197 | 7790 | # -*- coding: utf-8 -*-
# This module is based on the excellent work by Adam Bartoš who
# provided a lot of what went into the implementation here in
# the discussion to issue1602 in the Python bug tracker.
#
# There are some general differences in regards to how this works
# compared to the original patches as we do not need to patch
# the entire interpreter but just work in our little world of
# echo and prmopt.
import io
import os
import sys
import zlib
import time
import ctypes
import msvcrt
from click._compat import _NonClosingTextIOWrapper, text_type, PY2
from ctypes import byref, POINTER, c_int, c_char, c_char_p, \
c_void_p, py_object, c_ssize_t, c_ulong, windll, WINFUNCTYPE
try:
from ctypes import pythonapi
PyObject_GetBuffer = pythonapi.PyObject_GetBuffer
PyBuffer_Release = pythonapi.PyBuffer_Release
except ImportError:
pythonapi = None
from ctypes.wintypes import LPWSTR, LPCWSTR
c_ssize_p = POINTER(c_ssize_t)
kernel32 = windll.kernel32
GetStdHandle = kernel32.GetStdHandle
ReadConsoleW = kernel32.ReadConsoleW
WriteConsoleW = kernel32.WriteConsoleW
GetLastError = kernel32.GetLastError
GetCommandLineW = WINFUNCTYPE(LPWSTR)(
('GetCommandLineW', windll.kernel32))
CommandLineToArgvW = WINFUNCTYPE(
POINTER(LPWSTR), LPCWSTR, POINTER(c_int))(
('CommandLineToArgvW', windll.shell32))
STDIN_HANDLE = GetStdHandle(-10)
STDOUT_HANDLE = GetStdHandle(-11)
STDERR_HANDLE = GetStdHandle(-12)
PyBUF_SIMPLE = 0
PyBUF_WRITABLE = 1
ERROR_SUCCESS = 0
ERROR_NOT_ENOUGH_MEMORY = 8
ERROR_OPERATION_ABORTED = 995
STDIN_FILENO = 0
STDOUT_FILENO = 1
STDERR_FILENO = 2
EOF = b'\x1a'
MAX_BYTES_WRITTEN = 32767
class Py_buffer(ctypes.Structure):
_fields_ = [
('buf', c_void_p),
('obj', py_object),
('len', c_ssize_t),
('itemsize', c_ssize_t),
('readonly', c_int),
('ndim', c_int),
('format', c_char_p),
('shape', c_ssize_p),
('strides', c_ssize_p),
('suboffsets', c_ssize_p),
('internal', c_void_p)
]
if PY2:
_fields_.insert(-1, ('smalltable', c_ssize_t * 2))
# On PyPy we cannot get buffers so our ability to operate here is
# serverly limited.
if pythonapi is None:
get_buffer = None
else:
def get_buffer(obj, writable=False):
buf = Py_buffer()
flags = PyBUF_WRITABLE if writable else PyBUF_SIMPLE
PyObject_GetBuffer(py_object(obj), byref(buf), flags)
try:
buffer_type = c_char * buf.len
return buffer_type.from_address(buf.buf)
finally:
PyBuffer_Release(byref(buf))
class _WindowsConsoleRawIOBase(io.RawIOBase):
def __init__(self, handle):
self.handle = handle
def isatty(self):
io.RawIOBase.isatty(self)
return True
class _WindowsConsoleReader(_WindowsConsoleRawIOBase):
def readable(self):
return True
def readinto(self, b):
bytes_to_be_read = len(b)
if not bytes_to_be_read:
return 0
elif bytes_to_be_read % 2:
raise ValueError('cannot read odd number of bytes from '
'UTF-16-LE encoded console')
buffer = get_buffer(b, writable=True)
code_units_to_be_read = bytes_to_be_read // 2
code_units_read = c_ulong()
rv = ReadConsoleW(self.handle, buffer, code_units_to_be_read,
byref(code_units_read), None)
if GetLastError() == ERROR_OPERATION_ABORTED:
# wait for KeyboardInterrupt
time.sleep(0.1)
if not rv:
raise OSError('Windows error: %s' % GetLastError())
if buffer[0] == EOF:
return 0
return 2 * code_units_read.value
class _WindowsConsoleWriter(_WindowsConsoleRawIOBase):
def writable(self):
return True
@staticmethod
def _get_error_message(errno):
if errno == ERROR_SUCCESS:
return 'ERROR_SUCCESS'
elif errno == ERROR_NOT_ENOUGH_MEMORY:
return 'ERROR_NOT_ENOUGH_MEMORY'
return 'Windows error %s' % errno
def write(self, b):
bytes_to_be_written = len(b)
buf = get_buffer(b)
code_units_to_be_written = min(bytes_to_be_written,
MAX_BYTES_WRITTEN) // 2
code_units_written = c_ulong()
WriteConsoleW(self.handle, buf, code_units_to_be_written,
byref(code_units_written), None)
bytes_written = 2 * code_units_written.value
if bytes_written == 0 and bytes_to_be_written > 0:
raise OSError(self._get_error_message(GetLastError()))
return bytes_written
class ConsoleStream(object):
def __init__(self, text_stream, byte_stream):
self._text_stream = text_stream
self.buffer = byte_stream
@property
def name(self):
return self.buffer.name
def write(self, x):
if isinstance(x, text_type):
return self._text_stream.write(x)
try:
self.flush()
except Exception:
pass
return self.buffer.write(x)
def writelines(self, lines):
for line in lines:
self.write(line)
def __getattr__(self, name):
return getattr(self._text_stream, name)
def isatty(self):
return self.buffer.isatty()
def __repr__(self):
return '<ConsoleStream name=%r encoding=%r>' % (
self.name,
self.encoding,
)
def _get_text_stdin(buffer_stream):
text_stream = _NonClosingTextIOWrapper(
io.BufferedReader(_WindowsConsoleReader(STDIN_HANDLE)),
'utf-16-le', 'strict', line_buffering=True)
return ConsoleStream(text_stream, buffer_stream)
def _get_text_stdout(buffer_stream):
text_stream = _NonClosingTextIOWrapper(
_WindowsConsoleWriter(STDOUT_HANDLE),
'utf-16-le', 'strict', line_buffering=True)
return ConsoleStream(text_stream, buffer_stream)
def _get_text_stderr(buffer_stream):
text_stream = _NonClosingTextIOWrapper(
_WindowsConsoleWriter(STDERR_HANDLE),
'utf-16-le', 'strict', line_buffering=True)
return ConsoleStream(text_stream, buffer_stream)
if PY2:
def _hash_py_argv():
return zlib.crc32('\x00'.join(sys.argv[1:]))
_initial_argv_hash = _hash_py_argv()
def _get_windows_argv():
argc = c_int(0)
argv_unicode = CommandLineToArgvW(GetCommandLineW(), byref(argc))
argv = [argv_unicode[i] for i in range(0, argc.value)]
if not hasattr(sys, 'frozen'):
argv = argv[1:]
while len(argv) > 0:
arg = argv[0]
if not arg.startswith('-') or arg == '-':
break
argv = argv[1:]
if arg.startswith(('-c', '-m')):
break
return argv[1:]
_stream_factories = {
0: _get_text_stdin,
1: _get_text_stdout,
2: _get_text_stderr,
}
def _get_windows_console_stream(f, encoding, errors):
if get_buffer is not None and \
encoding in ('utf-16-le', None) \
and errors in ('strict', None) and \
hasattr(f, 'isatty') and f.isatty():
func = _stream_factories.get(f.fileno())
if func is not None:
if not PY2:
f = getattr(f, 'buffer')
if f is None:
return None
else:
# If we are on Python 2 we need to set the stream that we
# deal with to binary mode as otherwise the exercise if a
# bit moot. The same problems apply as for
# get_binary_stdin and friends from _compat.
msvcrt.setmode(f.fileno(), os.O_BINARY)
return func(f)
| mit |
abitofalchemy/ScientificImpactPrediction | procjson_tograph.py | 1 | 8162 | # -*- coding: utf-8 -*-
__author__ = 'Sal Aguinaga'
__license__ = "GPL"
__version__ = "0.1.0"
__email__ = "saguinag@nd.edu"
import shelve
import numpy as np
import pandas as pd
import networkx as nx
import math
import argparse
import os
import sa_net_metrics as snm
import matplotlib
import itertools
import pprint as pp
matplotlib.use('pdf')
import matplotlib.pyplot as plt
plt.style.use('ggplot')
# Links:
# [0] http://www.programcreek.com/python/example/5240/numpy.loadtxt
# [1] http://stackoverflow.com/questions/35782251/python-how-to-color-the-nodes-of-a-network-according-to-their-degree/35786355
def draw_citing_users_follower_count():
df = pd.read_csv('Results/twtrs_follower_network.tsv', sep='\t', header=None)
df.columns = ['src', 'followers']
count_followers = lambda row: len(row[1].split(','))
df['fCnt'] = df.apply(count_followers, axis=1)
edglstdf = pd.read_csv('Results/clustered_relevant_users.tsv', sep='\t', header=None)
eldf = edglstdf.apply(lambda row: [x.lstrip('[').rstrip(']') for x in row])
eldf.columns = ['src','trg']
eldf[['src']] = eldf[['src']].apply(pd.to_numeric)
df = pd.merge(eldf,df, on='src')
df[['src','trg','fCnt']].to_csv('Results/procjson_edglst.tsv', sep='\t', header=False, index=False)
g=nx.Graph()
g.add_edges_from(df[['src','trg']].values)
print nx.info(g)
f, axs = plt.subplots(1, 1, figsize=(1.6*6., 1*6.))
# nx.draw_networkx(g, pos=nx.spring_layout(g), ax=axs, with_labels=False, node_size=df[['fCnt']]/float(len(df)), alpha=.5)
pos=nx.spring_layout(g)
# nx.draw_networkx(g, pos=pos, ax=axs, with_labels=False, alpha=.5, node_size=30)
nx.draw_networkx_edges(g, pos=pos, ax=axs, alpha=0.5, width=0.8)
nx.draw_networkx_nodes(g, pos=pos, ax=axs, nodelist=list(df['src'].values), node_color='#7A83AC', node_size=30, alpha=0.5)
nx.draw_networkx_nodes(g, pos=pos, ax=axs, nodelist=list(df['trg'].values), node_color='k', node_size=20, alpha=0.8)
axs.patch.set_facecolor('None')
axs.set_xticks([]) #[None]# grid(True, which='both')
axs.set_yticks([]) #[None]# grid(True, which='both')
plt.savefig('figures/outfig', bbox_inches='tight', pad_inches=0)
return
def convert_follower_network_2edgelist():
dbg = False
df = pd.read_csv('Results/twtrs_follower_network.tsv', sep='\t', header=None)
edges = []
with open('Results/procjson.tsv', 'w') as fout:
for row in df.iterrows():
# get a count of the followers : a naive approach
users_flist = np.array([long(x) for x in row[1][1].lstrip('[').rstrip(']').split(',') if x != ''])
sampsize = int(math.ceil(len(users_flist) * .05))
# pick 10% of their follower network at random
if len(users_flist) > 1:
idx = np.arange(len(users_flist))
np.random.shuffle(idx)
subsample = users_flist[idx[:sampsize]]
else:
subsample = users_flist
# now get the network for submample
for j, trg in enumerate(subsample):
fout.write('{}\t{}\n'.format(row[1][0], trg)) # ong(trg.strip())))
edges.append((row[1][0], trg))
if dbg: print row[1][0], len(row[1][1].lstrip('[').rstrip(']').split(','))
if dbg: print len(edges)
return edges
def visualize_graph(graph):
if graph is None: return
G = graph
# identify largest connected component
Gcc = sorted(nx.connected_component_subgraphs(G), key=len, reverse=True)
print [len(x) for x in Gcc]
Gcc = Gcc[0]
print nx.info(Gcc)
print 'A'
pos = nx.circular_layout(Gcc)
print 'B'
nx.draw_networkx(Gcc, pos, with_labels=False, width=0.125, node_size=20, alpha=0.5)
# nx.draw(Gcc, pos=nx.spring_layout(G))
# print saving to disk
print 'Saving to disk ...'
plt.savefig('outplot', bb__inches='tight')
df = pd.DataFrame.from_dict(G.degree().items())
df.columns = ['v', 'k']
gb = df.groupby(['k']).count()
gb['pk'] = gb / float(G.number_of_nodes())
print gb.head(), '<= gb'
# gb['deg'] = gb.index.values
print gb.head()
gb['pk'].to_csv('Results/degree.tsv', index=True, sep="\t", header=True)
# draw graph
# G=nx.random_geometric_graph(G.number_of_nodes(),0.125)
# position is stored as node attribute data for random_geometric_graph
# pos=nx.get_node_attributes(G,'pos')
nx.draw_networkx(G, pos=nx.spring_layout(G), node_size=20, with_labels=False, alpha=0.75, weight=0.5)
# print saving to disk
print 'Saving to disk ...'
plt.savefig('outplot', bb__inches='tight')
def main111():
if 1:
G = nx.read_edgelist(infname)
print nx.info(G)
# Graph adj matix
A = nx.to_scipy_sparse_matrix(G)
print type(A)
from scipy import sparse, io
io.mmwrite("Results/test.mtx", A)
exit()
# write to disk clustering coeffs for this graph
snm.get_clust_coeff([G], 'orig', 'mmonth')
# write to disk egienvalue
snm.network_value_distribution([G], [], 'origMmonth')
if 0:
edgelist = np.loadtxt(infname, dtype=str, delimiter='\t')
print edgelist[:4]
idx = np.arange(len(edgelist))
np.random.shuffle(idx)
subsamp_edgelist = edgelist[idx[:100]]
G = nx.Graph()
G.add_edges_from([(long(x), long(y)) for x, y in subsamp_edgelist])
# visualize this graph
# visualize_graph(G)
exit()
G = nx.Graph()
G.add_edges_from([(long(x), long(y)) for x, y in edgelist])
print nx.info(G)
print 'Done'
def draw_basic_network(G,src_list):
slpos = nx.spring_layout(G) # see this for a great grid layout [1]
nx.draw_networkx(G, pos=slpos, node_color='b', nodelist=src_list, with_labels=False,node_size=20, \
edge_color='#7146CC')
nx.draw_networkx_nodes(G, pos=slpos, node_color='r', nodelist=[x for x in G.nodes() if x not in src_list], \
alpha=0.8, with_labels=False,node_size=20)
plt.savefig('figures/basicfig', bbox_inches='tight', pad_inches=0)
def get_parser():
parser = argparse.ArgumentParser(description='procjson clust | Ex: python procjson_clust.py '+
'Results/tweets_cleaned.tsv')
parser.add_argument("--do-fcount", default=False, action="store_true" , help='draw citing-users & follower count')
parser.add_argument("--do-metrics", default=False, action="store_true" , help='compute metrics and write to disk')
parser.add_argument('--version', action='version', version=__version__)
return parser
def main():
parser = get_parser()
args = vars(parser.parse_args())
print args
''' draw a graph of citing-users and their follower count
output: figures/outfig.pdf
'''
if args['do_fcount'] == True:
print '-'*4, 'draw a graph of citing-users and their follower count'
draw_citing_users_follower_count()
exit()
infname = 'Results/procjson.tsv'
infname = "Results/clustered_relevant_users.tsv"
with open(infname) as f:
lines = f.readlines()
edges = []
sourc = []
for j,l in enumerate(lines):
l = l.rstrip('\r\n')
lparts = l.split('\t')
edgesLst= [np.int64(p.lstrip('[').rstrip(']')) for p in lparts]
edges.append(tuple(edgesLst))
sourc.append(edgesLst[0])
# Add the twitter users' follower network
# processes this file: twtrs_follower_network.tsv
plusEdgesLst = convert_follower_network_2edgelist()
fllwrsEdges =[]
for x,y in plusEdgesLst:
x = np.int64(x)
y = np.int64(x)
fllwrsEdges.append((x,y))
####
#### Builds the basic graph
####
g = nx.Graph()
g.add_edges_from(edges)
print nx.info(g)
print '-'*4,'draw basic network'
draw_basic_network(g,sourc)
g.add_edges_from(plusEdgesLst)
print nx.info(g)
if args ['do_metrics'] == True:
print '-'*4,'compute network metrics and write to disk'
## \ /
## \/ isualization
# deg distrib
snm.get_degree_dist([g],"citeplus", 'orig')
# write to disk clustering coeffs for this graph
snm.get_clust_coeff([g], 'orig', 'citeplus')
# write to disk egienvalue
snm.network_value_distribution([g], [], 'citeplus')
if 0:
L = nx.normalized_laplacian_matrix(g)
e = np.linalg.eigvals(L.A)
print("Largest eigenvalue:", max(e))
print("Smallest eigenvalue:", min(e))
if __name__ == '__main__':
main()
| mit |
535521469/crawler_sth | scrapy/utils/reactor.py | 178 | 1360 | from twisted.internet import reactor, error
def listen_tcp(portrange, host, factory):
"""Like reactor.listenTCP but tries different ports in a range."""
assert len(portrange) <= 2, "invalid portrange: %s" % portrange
if not hasattr(portrange, '__iter__'):
return reactor.listenTCP(portrange, factory, interface=host)
if not portrange:
return reactor.listenTCP(0, factory, interface=host)
if len(portrange) == 1:
return reactor.listenTCP(portrange[0], factory, interface=host)
for x in range(portrange[0], portrange[1]+1):
try:
return reactor.listenTCP(x, factory, interface=host)
except error.CannotListenError:
if x == portrange[1]:
raise
class CallLaterOnce(object):
"""Schedule a function to be called in the next reactor loop, but only if
it hasn't been already scheduled since the last time it run.
"""
def __init__(self, func, *a, **kw):
self._func = func
self._a = a
self._kw = kw
self._call = None
def schedule(self, delay=0):
if self._call is None:
self._call = reactor.callLater(delay, self)
def cancel(self):
if self._call:
self._call.cancel()
def __call__(self):
self._call = None
return self._func(*self._a, **self._kw)
| bsd-3-clause |
Lynx187/script.module.urlresolver | lib/urlresolver/plugins/megavids.py | 3 | 2368 | '''
Allmyvideos urlresolver plugin
Copyright (C) 2013 Vinnydude
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
from t0mm0.common.net import Net
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
from urlresolver import common
class MegaVidsResolver(Plugin, UrlResolver, PluginSettings):
implements = [UrlResolver, PluginSettings]
name = "mega-vids"
domains = [ "mega-vids.com" ]
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
self.net = Net()
def get_media_url(self, host, media_id):
url = self.get_url(host, media_id)
html = self.net.http_GET(url).content
data = {}
r = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', html)
for name, value in r:
data[name] = value
html = self.net.http_POST(url, data).content
r = re.search("file\s*:\s*'(.+?)'", html)
if r:
return r.group(1)
else:
raise UrlResolver.ResolverError('could not find video')
def get_url(self, host, media_id):
return 'http://mega-vids.com/%s' % media_id
def get_host_and_id(self, url):
r = re.search('//(.+?)/(?:embed-)?([0-9a-zA-Z]+)',url)
if r:
return r.groups()
else:
return False
return('host', 'media_id')
def valid_url(self, url, host):
if self.get_setting('enabled') == 'false': return False
return (re.match('http://(www.)?mega-vids.com/[0-9A-Za-z]+', url) or re.match('http://(www.)?mega-vids.com/embed-[0-9A-Za-z]+[\-]*\d*[x]*\d*.*[html]*', url) or 'mega-vids' in host)
| gpl-2.0 |
apiaryio/heroku-buildpack-python-scons | vendor/distribute-0.6.36/setuptools/command/install_lib.py | 454 | 2486 | from distutils.command.install_lib import install_lib as _install_lib
import os
class install_lib(_install_lib):
"""Don't add compiled flags to filenames of non-Python files"""
def _bytecode_filenames (self, py_filenames):
bytecode_files = []
for py_file in py_filenames:
if not py_file.endswith('.py'):
continue
if self.compile:
bytecode_files.append(py_file + "c")
if self.optimize > 0:
bytecode_files.append(py_file + "o")
return bytecode_files
def run(self):
self.build()
outfiles = self.install()
if outfiles is not None:
# always compile, in case we have any extension stubs to deal with
self.byte_compile(outfiles)
def get_exclusions(self):
exclude = {}
nsp = self.distribution.namespace_packages
if (nsp and self.get_finalized_command('install')
.single_version_externally_managed
):
for pkg in nsp:
parts = pkg.split('.')
while parts:
pkgdir = os.path.join(self.install_dir, *parts)
for f in '__init__.py', '__init__.pyc', '__init__.pyo':
exclude[os.path.join(pkgdir,f)] = 1
parts.pop()
return exclude
def copy_tree(
self, infile, outfile,
preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1
):
assert preserve_mode and preserve_times and not preserve_symlinks
exclude = self.get_exclusions()
if not exclude:
return _install_lib.copy_tree(self, infile, outfile)
# Exclude namespace package __init__.py* files from the output
from setuptools.archive_util import unpack_directory
from distutils import log
outfiles = []
def pf(src, dst):
if dst in exclude:
log.warn("Skipping installation of %s (namespace package)",dst)
return False
log.info("copying %s -> %s", src, os.path.dirname(dst))
outfiles.append(dst)
return dst
unpack_directory(infile, outfile, pf)
return outfiles
def get_outputs(self):
outputs = _install_lib.get_outputs(self)
exclude = self.get_exclusions()
if exclude:
return [f for f in outputs if f not in exclude]
return outputs
| mit |
strands-project/strands_qsr_lib | qsr_lib/tests/rcc3_tester.py | 8 | 2273 | #!/usr/bin/env python
from __future__ import print_function, division
import sys
from abstractclass_unittest_dyadic import Abstractclass_Unittest_Dyadic
class RCC3_Test(Abstractclass_Unittest_Dyadic):
def __init__(self, *args):
super(RCC3_Test, self).__init__(*args)
self._unique_id = "rcc3"
self.__params = {"quantisation_factor": 2.0}
self.__custom = {self._unique_id: {"qsrs_for": [("o1", "o2")],
"quantisation_factor": 2.0}}
def test_defaults(self):
self.assertItemsEqual(*self.defaults("data1", "data1_rcc3_defaults.txt"))
def test_qsrs_for_global_namespace(self):
self.assertItemsEqual(*self.qsrs_for_global_namespace("data1", "data1_rcc3_qsrs_for_global_namespace.txt"))
def test_qsrs_for_qsr_namespace(self):
self.assertItemsEqual(*self.qsrs_for_qsr_namespace("data1", "data1_rcc3_qsrs_for_qsr_namespace.txt"))
# precedes "for_all_qsrs" namespace
self.assertItemsEqual(*self.qsrs_for_qsr_namespace_over_global_namespace("data1",
"data1_rcc3_qsrs_for_qsr_namespace.txt"))
def test_q_factor(self):
self.assertItemsEqual(*self.q_factor("data1", "data1_rcc3_q_factor_2p0.txt",
self.__params["quantisation_factor"]))
q_factor_results, defaults_results = self.q_factor_data_notequal_defaults("data1_rcc3_q_factor_2p0.txt",
"data1_rcc3_defaults.txt")
self.assertFalse(q_factor_results == defaults_results)
def test_with_bounding_boxes(self):
self.assertItemsEqual(*self.defaults("data2", "data2_rcc3_defaults.txt"))
def test_without_bounding_boxes(self):
self.assertItemsEqual(*self.defaults("data3", "data3_rcc3_defaults.txt"))
def test_floats(self):
self.assertItemsEqual(*self.defaults("data4", "data4_rcc3_defaults.txt"))
def test_custom(self):
self.assertItemsEqual(*self.custom("data1", "data1_rcc3_custom.txt", self.__custom))
if __name__ == '__main__':
import rosunit
rosunit.unitrun("qsr_lib", "rcc3_test", RCC3_Test, sys.argv)
| mit |
blois/AndroidSDKCloneMin | ndk/prebuilt/linux-x86_64/lib/python2.7/binhex.py | 216 | 14476 | """Macintosh binhex compression/decompression.
easy interface:
binhex(inputfilename, outputfilename)
hexbin(inputfilename, outputfilename)
"""
#
# Jack Jansen, CWI, August 1995.
#
# The module is supposed to be as compatible as possible. Especially the
# easy interface should work "as expected" on any platform.
# XXXX Note: currently, textfiles appear in mac-form on all platforms.
# We seem to lack a simple character-translate in python.
# (we should probably use ISO-Latin-1 on all but the mac platform).
# XXXX The simple routines are too simple: they expect to hold the complete
# files in-core. Should be fixed.
# XXXX It would be nice to handle AppleDouble format on unix
# (for servers serving macs).
# XXXX I don't understand what happens when you get 0x90 times the same byte on
# input. The resulting code (xx 90 90) would appear to be interpreted as an
# escaped *value* of 0x90. All coders I've seen appear to ignore this nicety...
#
import sys
import os
import struct
import binascii
__all__ = ["binhex","hexbin","Error"]
class Error(Exception):
pass
# States (what have we written)
[_DID_HEADER, _DID_DATA, _DID_RSRC] = range(3)
# Various constants
REASONABLY_LARGE=32768 # Minimal amount we pass the rle-coder
LINELEN=64
RUNCHAR=chr(0x90) # run-length introducer
#
# This code is no longer byte-order dependent
#
# Workarounds for non-mac machines.
try:
from Carbon.File import FSSpec, FInfo
from MacOS import openrf
def getfileinfo(name):
finfo = FSSpec(name).FSpGetFInfo()
dir, file = os.path.split(name)
# XXX Get resource/data sizes
fp = open(name, 'rb')
fp.seek(0, 2)
dlen = fp.tell()
fp = openrf(name, '*rb')
fp.seek(0, 2)
rlen = fp.tell()
return file, finfo, dlen, rlen
def openrsrc(name, *mode):
if not mode:
mode = '*rb'
else:
mode = '*' + mode[0]
return openrf(name, mode)
except ImportError:
#
# Glue code for non-macintosh usage
#
class FInfo:
def __init__(self):
self.Type = '????'
self.Creator = '????'
self.Flags = 0
def getfileinfo(name):
finfo = FInfo()
# Quick check for textfile
fp = open(name)
data = open(name).read(256)
for c in data:
if not c.isspace() and (c<' ' or ord(c) > 0x7f):
break
else:
finfo.Type = 'TEXT'
fp.seek(0, 2)
dsize = fp.tell()
fp.close()
dir, file = os.path.split(name)
file = file.replace(':', '-', 1)
return file, finfo, dsize, 0
class openrsrc:
def __init__(self, *args):
pass
def read(self, *args):
return ''
def write(self, *args):
pass
def close(self):
pass
class _Hqxcoderengine:
"""Write data to the coder in 3-byte chunks"""
def __init__(self, ofp):
self.ofp = ofp
self.data = ''
self.hqxdata = ''
self.linelen = LINELEN-1
def write(self, data):
self.data = self.data + data
datalen = len(self.data)
todo = (datalen//3)*3
data = self.data[:todo]
self.data = self.data[todo:]
if not data:
return
self.hqxdata = self.hqxdata + binascii.b2a_hqx(data)
self._flush(0)
def _flush(self, force):
first = 0
while first <= len(self.hqxdata)-self.linelen:
last = first + self.linelen
self.ofp.write(self.hqxdata[first:last]+'\n')
self.linelen = LINELEN
first = last
self.hqxdata = self.hqxdata[first:]
if force:
self.ofp.write(self.hqxdata + ':\n')
def close(self):
if self.data:
self.hqxdata = \
self.hqxdata + binascii.b2a_hqx(self.data)
self._flush(1)
self.ofp.close()
del self.ofp
class _Rlecoderengine:
"""Write data to the RLE-coder in suitably large chunks"""
def __init__(self, ofp):
self.ofp = ofp
self.data = ''
def write(self, data):
self.data = self.data + data
if len(self.data) < REASONABLY_LARGE:
return
rledata = binascii.rlecode_hqx(self.data)
self.ofp.write(rledata)
self.data = ''
def close(self):
if self.data:
rledata = binascii.rlecode_hqx(self.data)
self.ofp.write(rledata)
self.ofp.close()
del self.ofp
class BinHex:
def __init__(self, name_finfo_dlen_rlen, ofp):
name, finfo, dlen, rlen = name_finfo_dlen_rlen
if type(ofp) == type(''):
ofname = ofp
ofp = open(ofname, 'w')
ofp.write('(This file must be converted with BinHex 4.0)\n\n:')
hqxer = _Hqxcoderengine(ofp)
self.ofp = _Rlecoderengine(hqxer)
self.crc = 0
if finfo is None:
finfo = FInfo()
self.dlen = dlen
self.rlen = rlen
self._writeinfo(name, finfo)
self.state = _DID_HEADER
def _writeinfo(self, name, finfo):
nl = len(name)
if nl > 63:
raise Error, 'Filename too long'
d = chr(nl) + name + '\0'
d2 = finfo.Type + finfo.Creator
# Force all structs to be packed with big-endian
d3 = struct.pack('>h', finfo.Flags)
d4 = struct.pack('>ii', self.dlen, self.rlen)
info = d + d2 + d3 + d4
self._write(info)
self._writecrc()
def _write(self, data):
self.crc = binascii.crc_hqx(data, self.crc)
self.ofp.write(data)
def _writecrc(self):
# XXXX Should this be here??
# self.crc = binascii.crc_hqx('\0\0', self.crc)
if self.crc < 0:
fmt = '>h'
else:
fmt = '>H'
self.ofp.write(struct.pack(fmt, self.crc))
self.crc = 0
def write(self, data):
if self.state != _DID_HEADER:
raise Error, 'Writing data at the wrong time'
self.dlen = self.dlen - len(data)
self._write(data)
def close_data(self):
if self.dlen != 0:
raise Error, 'Incorrect data size, diff=%r' % (self.rlen,)
self._writecrc()
self.state = _DID_DATA
def write_rsrc(self, data):
if self.state < _DID_DATA:
self.close_data()
if self.state != _DID_DATA:
raise Error, 'Writing resource data at the wrong time'
self.rlen = self.rlen - len(data)
self._write(data)
def close(self):
if self.state < _DID_DATA:
self.close_data()
if self.state != _DID_DATA:
raise Error, 'Close at the wrong time'
if self.rlen != 0:
raise Error, \
"Incorrect resource-datasize, diff=%r" % (self.rlen,)
self._writecrc()
self.ofp.close()
self.state = None
del self.ofp
def binhex(inp, out):
"""(infilename, outfilename) - Create binhex-encoded copy of a file"""
finfo = getfileinfo(inp)
ofp = BinHex(finfo, out)
ifp = open(inp, 'rb')
# XXXX Do textfile translation on non-mac systems
while 1:
d = ifp.read(128000)
if not d: break
ofp.write(d)
ofp.close_data()
ifp.close()
ifp = openrsrc(inp, 'rb')
while 1:
d = ifp.read(128000)
if not d: break
ofp.write_rsrc(d)
ofp.close()
ifp.close()
class _Hqxdecoderengine:
"""Read data via the decoder in 4-byte chunks"""
def __init__(self, ifp):
self.ifp = ifp
self.eof = 0
def read(self, totalwtd):
"""Read at least wtd bytes (or until EOF)"""
decdata = ''
wtd = totalwtd
#
# The loop here is convoluted, since we don't really now how
# much to decode: there may be newlines in the incoming data.
while wtd > 0:
if self.eof: return decdata
wtd = ((wtd+2)//3)*4
data = self.ifp.read(wtd)
#
# Next problem: there may not be a complete number of
# bytes in what we pass to a2b. Solve by yet another
# loop.
#
while 1:
try:
decdatacur, self.eof = \
binascii.a2b_hqx(data)
break
except binascii.Incomplete:
pass
newdata = self.ifp.read(1)
if not newdata:
raise Error, \
'Premature EOF on binhex file'
data = data + newdata
decdata = decdata + decdatacur
wtd = totalwtd - len(decdata)
if not decdata and not self.eof:
raise Error, 'Premature EOF on binhex file'
return decdata
def close(self):
self.ifp.close()
class _Rledecoderengine:
"""Read data via the RLE-coder"""
def __init__(self, ifp):
self.ifp = ifp
self.pre_buffer = ''
self.post_buffer = ''
self.eof = 0
def read(self, wtd):
if wtd > len(self.post_buffer):
self._fill(wtd-len(self.post_buffer))
rv = self.post_buffer[:wtd]
self.post_buffer = self.post_buffer[wtd:]
return rv
def _fill(self, wtd):
self.pre_buffer = self.pre_buffer + self.ifp.read(wtd+4)
if self.ifp.eof:
self.post_buffer = self.post_buffer + \
binascii.rledecode_hqx(self.pre_buffer)
self.pre_buffer = ''
return
#
# Obfuscated code ahead. We have to take care that we don't
# end up with an orphaned RUNCHAR later on. So, we keep a couple
# of bytes in the buffer, depending on what the end of
# the buffer looks like:
# '\220\0\220' - Keep 3 bytes: repeated \220 (escaped as \220\0)
# '?\220' - Keep 2 bytes: repeated something-else
# '\220\0' - Escaped \220: Keep 2 bytes.
# '?\220?' - Complete repeat sequence: decode all
# otherwise: keep 1 byte.
#
mark = len(self.pre_buffer)
if self.pre_buffer[-3:] == RUNCHAR + '\0' + RUNCHAR:
mark = mark - 3
elif self.pre_buffer[-1] == RUNCHAR:
mark = mark - 2
elif self.pre_buffer[-2:] == RUNCHAR + '\0':
mark = mark - 2
elif self.pre_buffer[-2] == RUNCHAR:
pass # Decode all
else:
mark = mark - 1
self.post_buffer = self.post_buffer + \
binascii.rledecode_hqx(self.pre_buffer[:mark])
self.pre_buffer = self.pre_buffer[mark:]
def close(self):
self.ifp.close()
class HexBin:
def __init__(self, ifp):
if type(ifp) == type(''):
ifp = open(ifp)
#
# Find initial colon.
#
while 1:
ch = ifp.read(1)
if not ch:
raise Error, "No binhex data found"
# Cater for \r\n terminated lines (which show up as \n\r, hence
# all lines start with \r)
if ch == '\r':
continue
if ch == ':':
break
if ch != '\n':
dummy = ifp.readline()
hqxifp = _Hqxdecoderengine(ifp)
self.ifp = _Rledecoderengine(hqxifp)
self.crc = 0
self._readheader()
def _read(self, len):
data = self.ifp.read(len)
self.crc = binascii.crc_hqx(data, self.crc)
return data
def _checkcrc(self):
filecrc = struct.unpack('>h', self.ifp.read(2))[0] & 0xffff
#self.crc = binascii.crc_hqx('\0\0', self.crc)
# XXXX Is this needed??
self.crc = self.crc & 0xffff
if filecrc != self.crc:
raise Error, 'CRC error, computed %x, read %x' \
%(self.crc, filecrc)
self.crc = 0
def _readheader(self):
len = self._read(1)
fname = self._read(ord(len))
rest = self._read(1+4+4+2+4+4)
self._checkcrc()
type = rest[1:5]
creator = rest[5:9]
flags = struct.unpack('>h', rest[9:11])[0]
self.dlen = struct.unpack('>l', rest[11:15])[0]
self.rlen = struct.unpack('>l', rest[15:19])[0]
self.FName = fname
self.FInfo = FInfo()
self.FInfo.Creator = creator
self.FInfo.Type = type
self.FInfo.Flags = flags
self.state = _DID_HEADER
def read(self, *n):
if self.state != _DID_HEADER:
raise Error, 'Read data at wrong time'
if n:
n = n[0]
n = min(n, self.dlen)
else:
n = self.dlen
rv = ''
while len(rv) < n:
rv = rv + self._read(n-len(rv))
self.dlen = self.dlen - n
return rv
def close_data(self):
if self.state != _DID_HEADER:
raise Error, 'close_data at wrong time'
if self.dlen:
dummy = self._read(self.dlen)
self._checkcrc()
self.state = _DID_DATA
def read_rsrc(self, *n):
if self.state == _DID_HEADER:
self.close_data()
if self.state != _DID_DATA:
raise Error, 'Read resource data at wrong time'
if n:
n = n[0]
n = min(n, self.rlen)
else:
n = self.rlen
self.rlen = self.rlen - n
return self._read(n)
def close(self):
if self.rlen:
dummy = self.read_rsrc(self.rlen)
self._checkcrc()
self.state = _DID_RSRC
self.ifp.close()
def hexbin(inp, out):
"""(infilename, outfilename) - Decode binhexed file"""
ifp = HexBin(inp)
finfo = ifp.FInfo
if not out:
out = ifp.FName
ofp = open(out, 'wb')
# XXXX Do translation on non-mac systems
while 1:
d = ifp.read(128000)
if not d: break
ofp.write(d)
ofp.close()
ifp.close_data()
d = ifp.read_rsrc(128000)
if d:
ofp = openrsrc(out, 'wb')
ofp.write(d)
while 1:
d = ifp.read_rsrc(128000)
if not d: break
ofp.write(d)
ofp.close()
ifp.close()
def _test():
fname = sys.argv[1]
binhex(fname, fname+'.hqx')
hexbin(fname+'.hqx', fname+'.viahqx')
#hexbin(fname, fname+'.unpacked')
sys.exit(1)
if __name__ == '__main__':
_test()
| apache-2.0 |
mojeto/django | django/contrib/gis/db/models/fields.py | 7 | 16340 | from collections import defaultdict
from django.contrib.gis import forms, gdal
from django.contrib.gis.db.models.lookups import (
RasterBandTransform, gis_lookups,
)
from django.contrib.gis.db.models.proxy import SpatialProxy
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.geometry.backend import Geometry, GeometryException
from django.core.exceptions import ImproperlyConfigured
from django.db.models.expressions import Expression
from django.db.models.fields import Field
from django.utils.translation import gettext_lazy as _
# Local cache of the spatial_ref_sys table, which holds SRID data for each
# spatial database alias. This cache exists so that the database isn't queried
# for SRID info each time a distance query is constructed.
_srid_cache = defaultdict(dict)
def get_srid_info(srid, connection):
"""
Return the units, unit name, and spheroid WKT associated with the
given SRID from the `spatial_ref_sys` (or equivalent) spatial database
table for the given database connection. These results are cached.
"""
from django.contrib.gis.gdal import SpatialReference
global _srid_cache
try:
# The SpatialRefSys model for the spatial backend.
SpatialRefSys = connection.ops.spatial_ref_sys()
except NotImplementedError:
SpatialRefSys = None
alias, get_srs = (
(connection.alias, lambda srid: SpatialRefSys.objects.using(connection.alias).get(srid=srid).srs)
if SpatialRefSys else
(None, SpatialReference)
)
if srid not in _srid_cache[alias]:
srs = get_srs(srid)
units, units_name = srs.units
sphere_name = srs['spheroid']
spheroid = 'SPHEROID["%s",%s,%s]' % (sphere_name, srs.semi_major, srs.inverse_flattening)
_srid_cache[alias][srid] = (units, units_name, spheroid)
return _srid_cache[alias][srid]
class GeoSelectFormatMixin:
def select_format(self, compiler, sql, params):
"""
Return the selection format string, depending on the requirements
of the spatial backend. For example, Oracle and MySQL require custom
selection formats in order to retrieve geometries in OGC WKT. For all
other fields, return a simple '%s' format string.
"""
connection = compiler.connection
srid = compiler.query.get_context('transformed_srid')
if srid:
sel_fmt = '%s(%%s, %s)' % (connection.ops.transform, srid)
else:
sel_fmt = '%s'
if connection.ops.select:
# This allows operations to be done on fields in the SELECT,
# overriding their values -- used by the Oracle and MySQL
# spatial backends to get database values as WKT, and by the
# `transform` method.
sel_fmt = connection.ops.select % sel_fmt
return sel_fmt % sql, params
class BaseSpatialField(Field):
"""
The Base GIS Field.
It's used as a base class for GeometryField and RasterField. Defines
properties that are common to all GIS fields such as the characteristics
of the spatial reference system of the field.
"""
description = _("The base GIS field.")
empty_strings_allowed = False
# Geodetic units.
geodetic_units = ('decimal degree', 'degree')
def __init__(self, verbose_name=None, srid=4326, spatial_index=True, **kwargs):
"""
The initialization function for base spatial fields. Takes the following
as keyword arguments:
srid:
The spatial reference system identifier, an OGC standard.
Defaults to 4326 (WGS84).
spatial_index:
Indicates whether to create a spatial index. Defaults to True.
Set this instead of 'db_index' for geographic fields since index
creation is different for geometry columns.
"""
# Setting the index flag with the value of the `spatial_index` keyword.
self.spatial_index = spatial_index
# Setting the SRID and getting the units. Unit information must be
# easily available in the field instance for distance queries.
self.srid = srid
# Setting the verbose_name keyword argument with the positional
# first parameter, so this works like normal fields.
kwargs['verbose_name'] = verbose_name
super().__init__(**kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
# Always include SRID for less fragility; include spatial index if it's
# not the default value.
kwargs['srid'] = self.srid
if self.spatial_index is not True:
kwargs['spatial_index'] = self.spatial_index
return name, path, args, kwargs
def db_type(self, connection):
return connection.ops.geo_db_type(self)
# The following functions are used to get the units, their name, and
# the spheroid corresponding to the SRID of the BaseSpatialField.
def _get_srid_info(self, connection):
# Get attributes from `get_srid_info`.
self._units, self._units_name, self._spheroid = get_srid_info(self.srid, connection)
def spheroid(self, connection):
if not hasattr(self, '_spheroid'):
self._get_srid_info(connection)
return self._spheroid
def units(self, connection):
if not hasattr(self, '_units'):
self._get_srid_info(connection)
return self._units
def units_name(self, connection):
if not hasattr(self, '_units_name'):
self._get_srid_info(connection)
return self._units_name
def geodetic(self, connection):
"""
Return true if this field's SRID corresponds with a coordinate
system that uses non-projected units (e.g., latitude/longitude).
"""
units_name = self.units_name(connection)
return units_name.lower() in self.geodetic_units if units_name else self.srid == 4326
def get_placeholder(self, value, compiler, connection):
"""
Return the placeholder for the spatial column for the
given value.
"""
return connection.ops.get_geom_placeholder(self, value, compiler)
def get_srid(self, obj):
"""
Return the default SRID for the given geometry or raster, taking into
account the SRID set for the field. For example, if the input geometry
or raster doesn't have an SRID, then the SRID of the field will be
returned.
"""
srid = obj.srid # SRID of given geometry.
if srid is None or self.srid == -1 or (srid == -1 and self.srid != -1):
return self.srid
else:
return srid
def get_db_prep_save(self, value, connection):
"""
Prepare the value for saving in the database.
"""
if isinstance(value, Geometry) or value:
return connection.ops.Adapter(self.get_prep_value(value))
else:
return None
def get_raster_prep_value(self, value, is_candidate):
"""
Return a GDALRaster if conversion is successful, otherwise return None.
"""
if isinstance(value, gdal.GDALRaster):
return value
elif is_candidate:
try:
return gdal.GDALRaster(value)
except GDALException:
pass
elif isinstance(value, dict):
try:
return gdal.GDALRaster(value)
except GDALException:
raise ValueError("Couldn't create spatial object from lookup value '%s'." % value)
def get_prep_value(self, value):
"""
Spatial lookup values are either a parameter that is (or may be
converted to) a geometry or raster, or a sequence of lookup values
that begins with a geometry or raster. Set up the geometry or raster
value properly and preserves any other lookup parameters.
"""
value = super().get_prep_value(value)
# For IsValid lookups, boolean values are allowed.
if isinstance(value, (Expression, bool)):
return value
elif isinstance(value, (tuple, list)):
obj = value[0]
seq_value = True
else:
obj = value
seq_value = False
# When the input is not a geometry or raster, attempt to construct one
# from the given string input.
if isinstance(obj, Geometry):
pass
else:
# Check if input is a candidate for conversion to raster or geometry.
is_candidate = isinstance(obj, (bytes, str)) or hasattr(obj, '__geo_interface__')
# Try to convert the input to raster.
raster = self.get_raster_prep_value(obj, is_candidate)
if raster:
obj = raster
elif is_candidate:
try:
obj = Geometry(obj)
except (GeometryException, GDALException):
raise ValueError("Couldn't create spatial object from lookup value '%s'." % obj)
else:
raise ValueError('Cannot use object with type %s for a spatial lookup parameter.' % type(obj).__name__)
# Assigning the SRID value.
obj.srid = self.get_srid(obj)
if seq_value:
lookup_val = [obj]
lookup_val.extend(value[1:])
return tuple(lookup_val)
else:
return obj
for klass in gis_lookups.values():
BaseSpatialField.register_lookup(klass)
class GeometryField(GeoSelectFormatMixin, BaseSpatialField):
"""
The base Geometry field -- maps to the OpenGIS Specification Geometry type.
"""
description = _("The base Geometry field -- maps to the OpenGIS Specification Geometry type.")
form_class = forms.GeometryField
# The OpenGIS Geometry name.
geom_type = 'GEOMETRY'
def __init__(self, verbose_name=None, dim=2, geography=False, *, extent=(-180.0, -90.0, 180.0, 90.0),
tolerance=0.05, **kwargs):
"""
The initialization function for geometry fields. In addition to the
parameters from BaseSpatialField, it takes the following as keyword
arguments:
dim:
The number of dimensions for this geometry. Defaults to 2.
extent:
Customize the extent, in a 4-tuple of WGS 84 coordinates, for the
geometry field entry in the `USER_SDO_GEOM_METADATA` table. Defaults
to (-180.0, -90.0, 180.0, 90.0).
tolerance:
Define the tolerance, in meters, to use for the geometry field
entry in the `USER_SDO_GEOM_METADATA` table. Defaults to 0.05.
"""
# Setting the dimension of the geometry field.
self.dim = dim
# Is this a geography rather than a geometry column?
self.geography = geography
# Oracle-specific private attributes for creating the entry in
# `USER_SDO_GEOM_METADATA`
self._extent = extent
self._tolerance = tolerance
super().__init__(verbose_name=verbose_name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
# Include kwargs if they're not the default values.
if self.dim != 2:
kwargs['dim'] = self.dim
if self.geography is not False:
kwargs['geography'] = self.geography
return name, path, args, kwargs
# ### Routines specific to GeometryField ###
def get_distance(self, value, lookup_type, connection):
"""
Return a distance number in units of the field. For example, if
`D(km=1)` was passed in and the units of the field were in meters,
then 1000 would be returned.
"""
return connection.ops.get_distance(self, value, lookup_type)
def get_db_prep_value(self, value, connection, *args, **kwargs):
return connection.ops.Adapter(
super().get_db_prep_value(value, connection, *args, **kwargs),
**({'geography': True} if self.geography else {})
)
def from_db_value(self, value, expression, connection, context):
if value:
if not isinstance(value, Geometry):
value = Geometry(value)
srid = value.srid
if not srid and self.srid != -1:
value.srid = self.srid
return value
# ### Routines overloaded from Field ###
def contribute_to_class(self, cls, name, **kwargs):
super().contribute_to_class(cls, name, **kwargs)
# Setup for lazy-instantiated Geometry object.
setattr(cls, self.attname, SpatialProxy(Geometry, self))
def formfield(self, **kwargs):
defaults = {'form_class': self.form_class,
'geom_type': self.geom_type,
'srid': self.srid,
}
defaults.update(kwargs)
if (self.dim > 2 and 'widget' not in kwargs and
not getattr(defaults['form_class'].widget, 'supports_3d', False)):
defaults['widget'] = forms.Textarea
return super().formfield(**defaults)
# The OpenGIS Geometry Type Fields
class PointField(GeometryField):
geom_type = 'POINT'
form_class = forms.PointField
description = _("Point")
class LineStringField(GeometryField):
geom_type = 'LINESTRING'
form_class = forms.LineStringField
description = _("Line string")
class PolygonField(GeometryField):
geom_type = 'POLYGON'
form_class = forms.PolygonField
description = _("Polygon")
class MultiPointField(GeometryField):
geom_type = 'MULTIPOINT'
form_class = forms.MultiPointField
description = _("Multi-point")
class MultiLineStringField(GeometryField):
geom_type = 'MULTILINESTRING'
form_class = forms.MultiLineStringField
description = _("Multi-line string")
class MultiPolygonField(GeometryField):
geom_type = 'MULTIPOLYGON'
form_class = forms.MultiPolygonField
description = _("Multi polygon")
class GeometryCollectionField(GeometryField):
geom_type = 'GEOMETRYCOLLECTION'
form_class = forms.GeometryCollectionField
description = _("Geometry collection")
class ExtentField(GeoSelectFormatMixin, Field):
"Used as a return value from an extent aggregate"
description = _("Extent Aggregate Field")
def get_internal_type(self):
return "ExtentField"
class RasterField(BaseSpatialField):
"""
Raster field for GeoDjango -- evaluates into GDALRaster objects.
"""
description = _("Raster Field")
geom_type = 'RASTER'
geography = False
def _check_connection(self, connection):
# Make sure raster fields are used only on backends with raster support.
if not connection.features.gis_enabled or not connection.features.supports_raster:
raise ImproperlyConfigured('Raster fields require backends with raster support.')
def db_type(self, connection):
self._check_connection(connection)
return super().db_type(connection)
def from_db_value(self, value, expression, connection, context):
return connection.ops.parse_raster(value)
def get_db_prep_value(self, value, connection, prepared=False):
self._check_connection(connection)
# Prepare raster for writing to database.
if not prepared:
value = connection.ops.deconstruct_raster(value)
return super().get_db_prep_value(value, connection, prepared)
def contribute_to_class(self, cls, name, **kwargs):
super().contribute_to_class(cls, name, **kwargs)
# Setup for lazy-instantiated Raster object. For large querysets, the
# instantiation of all GDALRasters can potentially be expensive. This
# delays the instantiation of the objects to the moment of evaluation
# of the raster attribute.
setattr(cls, self.attname, SpatialProxy(gdal.GDALRaster, self))
def get_transform(self, name):
try:
band_index = int(name)
return type(
'SpecificRasterBandTransform',
(RasterBandTransform, ),
{'band_index': band_index}
)
except ValueError:
pass
return super().get_transform(name)
| bsd-3-clause |
Phelimb/atlas | scripts/json_to_tsv.py | 1 | 6201 | #! /usr/bin/env python
# This script is intended to load a JSON dict containing resistotypes,
# a list of comids and a list of drugs of interest. It will return a column for each drug,
# Where 1 = R, 0 S, 0 unknown.
import argparse
import json
import csv
import os
parser = argparse.ArgumentParser(description='''load a JSON dict containing resistotypes,
a list of comids and a list of drugs of interest. It will return a column for each drug,
Where 1 = R, 0 S, 0 unknown. ''')
parser.add_argument('--format', type=str,
help='--format', default="long")
parser.add_argument('files', type=str, nargs='+',
help='files')
args = parser.parse_args()
def load_json(f):
with open(f, 'r') as infile:
d = json.load(infile)
return d
def get_drugs(drug_list):
drugs = []
for f in args.files:
try:
d = load_json(f)
except ValueError:
d = {}
for drug in drug_list:
if drug not in drugs:
drugs.append(drug)
return drugs
def get_phylo_group_string(d):
s = []
depth = []
per_cov = []
for k, v in d.get("phylogenetics", {}).get("phylo_group", {}).iteritems():
s.append(k)
depth.append(str(v.get("median_depth")))
per_cov.append(str(v.get("percent_coverage")))
return ";".join(s), ";".join(per_cov), ";".join(depth)
def get_species_string(d):
s = []
depth = []
per_cov = []
for k, v in d.get("phylogenetics", {}).get("species", {}).iteritems():
s.append(k)
depth.append(str(v.get("median_depth")))
per_cov.append(str(v.get("percent_coverage")))
return ";".join(s), ";".join(per_cov), ";".join(depth)
def get_lineage_string(d):
s = []
depth = []
per_cov = []
for k, v in d.get("phylogenetics", {}).get("lineage", {}).iteritems():
s.append(k)
depth.append(str(v.get("median_depth")))
per_cov.append(str(v.get("percent_coverage")))
return ";".join(s), ";".join(per_cov), ";".join(depth)
def get_file_name(f):
sample_name = os.path.basename(f).split('.')[0]
return sample_name
def get_sample_name(f):
return f.split('/')[-2]
def get_plate_name(f):
return f.split('/')[-3]
def get_expected_depth(d):
return str(d.get("expected_depth", -1))
def get_mean_read_length(d):
return str(d.get("mean_read_length", -1))
# def get_called_genes(d, drug=None):
# genes = []
# for gene, coverage in d.get("called_genes", {}).iteritems():
# if coverage.get("induced_resistance") == drug:
# genes.append(":".join([gene,
# str(coverage.get("per_cov")),
# str(coverage.get('median_cov'))]))
# return ";".join(genes)
def get_variant_calls(d):
variants = []
for variant_name, variant_call in d.items():
wt_depth = variant_call.get(
'info',
{}).get(
'coverage',
{}).get(
"reference",
{}).get("median_depth")
alt_depth = variant_call.get(
'info',
{}).get(
'coverage',
{}).get(
"alternate",
{}).get("median_depth")
wt_per_cov = variant_call.get(
'info',
{}).get(
'coverage',
{}).get(
"reference",
{}).get("percent_coverage")
alt_per_cov = variant_call.get(
'info',
{}).get(
'coverage',
{}).get(
"alternate",
{}).get("percent_coverage")
if wt_per_cov < 100:
wt_depth = 0
if alt_per_cov < 100:
alt_depth = 0
variants.append(":".join([variant_name,
str(int(alt_depth)), str(int(wt_depth))
]))
return ";".join(variants)
if args.format == "long":
header = [
"file",
# "plate_name",
# "sample",
"drug",
"phylo_group",
"species",
"lineage",
"phylo_group_per_covg",
"species_per_covg",
"lineage_per_covg",
"phylo_group_depth",
"species_depth",
"lineage_depth",
"susceptibility",
"variants (prot_mut-ref_mut:alt_depth:wt_depth)"]
print "\t".join(header)
rows = []
for i, f in enumerate(args.files):
try:
d = load_json(f)
except ValueError:
d = {}
for file in d.keys():
phylo_group, phylo_group_per_covg, phylo_group_depth = get_phylo_group_string(d[
file])
species, species_per_covg, species_depth = get_species_string(d[
file])
lineage, lineage_per_covg, lineage_depth = get_lineage_string(d[
file])
# sample_name = get_sample_name(f)
# plate_name = get_plate_name(f)
drug_list = sorted(d[file].get('susceptibility', {}).keys())
drugs = sorted(drug_list)
if not drugs:
drugs = ["NA"]
for drug in drugs:
call = d[file].get('susceptibility', {}).get(drug, {})
called_by = get_variant_calls(call.get("called_by", {}))
row = [
file,
# plate_name,
# sample_name,
drug,
phylo_group,
species,
lineage,
phylo_group_per_covg,
species_per_covg,
lineage_per_covg,
phylo_group_depth,
species_depth,
lineage_depth,
call.get(
"predict",
'N'),
called_by]
# rows.append(row)
print "\t".join(row)
else:
0 / 0
| mit |
agaffney/ansible | lib/ansible/module_utils/facts/system/platform.py | 66 | 4075 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import socket
import platform
from ansible.module_utils.facts.utils import get_file_content
from ansible.module_utils.facts.collector import BaseFactCollector
# i86pc is a Solaris and derivatives-ism
SOLARIS_I86_RE_PATTERN = r'i([3456]86|86pc)'
solaris_i86_re = re.compile(SOLARIS_I86_RE_PATTERN)
class PlatformFactCollector(BaseFactCollector):
name = 'platform'
_fact_ids = set(['system',
'kernel',
'kernel_version',
'machine',
'python_version',
'architecture',
'machine_id'])
def collect(self, module=None, collected_facts=None):
platform_facts = {}
# platform.system() can be Linux, Darwin, Java, or Windows
platform_facts['system'] = platform.system()
platform_facts['kernel'] = platform.release()
platform_facts['kernel_version'] = platform.version()
platform_facts['machine'] = platform.machine()
platform_facts['python_version'] = platform.python_version()
platform_facts['fqdn'] = socket.getfqdn()
platform_facts['hostname'] = platform.node().split('.')[0]
platform_facts['nodename'] = platform.node()
platform_facts['domain'] = '.'.join(platform_facts['fqdn'].split('.')[1:])
arch_bits = platform.architecture()[0]
platform_facts['userspace_bits'] = arch_bits.replace('bit', '')
if platform_facts['machine'] == 'x86_64':
platform_facts['architecture'] = platform_facts['machine']
if platform_facts['userspace_bits'] == '64':
platform_facts['userspace_architecture'] = 'x86_64'
elif platform_facts['userspace_bits'] == '32':
platform_facts['userspace_architecture'] = 'i386'
elif solaris_i86_re.search(platform_facts['machine']):
platform_facts['architecture'] = 'i386'
if platform_facts['userspace_bits'] == '64':
platform_facts['userspace_architecture'] = 'x86_64'
elif platform_facts['userspace_bits'] == '32':
platform_facts['userspace_architecture'] = 'i386'
else:
platform_facts['architecture'] = platform_facts['machine']
if platform_facts['system'] == 'AIX':
# Attempt to use getconf to figure out architecture
# fall back to bootinfo if needed
getconf_bin = module.get_bin_path('getconf')
if getconf_bin:
rc, out, err = module.run_command([getconf_bin, 'MACHINE_ARCHITECTURE'])
data = out.splitlines()
platform_facts['architecture'] = data[0]
else:
bootinfo_bin = module.get_bin_path('bootinfo')
rc, out, err = module.run_command([bootinfo_bin, '-p'])
data = out.splitlines()
platform_facts['architecture'] = data[0]
elif platform_facts['system'] == 'OpenBSD':
platform_facts['architecture'] = platform.uname()[5]
machine_id = get_file_content("/var/lib/dbus/machine-id") or get_file_content("/etc/machine-id")
if machine_id:
machine_id = machine_id.splitlines()[0]
platform_facts["machine_id"] = machine_id
return platform_facts
| gpl-3.0 |
iproduct/course-social-robotics | 11-dnn-keras/venv/Lib/site-packages/prompt_toolkit/key_binding/bindings/search.py | 3 | 2595 | """
Search related key bindings.
"""
from prompt_toolkit import search
from prompt_toolkit.application.current import get_app
from prompt_toolkit.filters import Condition, control_is_searchable, is_searching
from prompt_toolkit.key_binding.key_processor import KeyPressEvent
from ..key_bindings import key_binding
__all__ = [
"abort_search",
"accept_search",
"start_reverse_incremental_search",
"start_forward_incremental_search",
"reverse_incremental_search",
"forward_incremental_search",
"accept_search_and_accept_input",
]
E = KeyPressEvent
@key_binding(filter=is_searching)
def abort_search(event: E) -> None:
"""
Abort an incremental search and restore the original
line.
(Usually bound to ControlG/ControlC.)
"""
search.stop_search()
@key_binding(filter=is_searching)
def accept_search(event: E) -> None:
"""
When enter pressed in isearch, quit isearch mode. (Multiline
isearch would be too complicated.)
(Usually bound to Enter.)
"""
search.accept_search()
@key_binding(filter=control_is_searchable)
def start_reverse_incremental_search(event: E) -> None:
"""
Enter reverse incremental search.
(Usually ControlR.)
"""
search.start_search(direction=search.SearchDirection.BACKWARD)
@key_binding(filter=control_is_searchable)
def start_forward_incremental_search(event: E) -> None:
"""
Enter forward incremental search.
(Usually ControlS.)
"""
search.start_search(direction=search.SearchDirection.FORWARD)
@key_binding(filter=is_searching)
def reverse_incremental_search(event: E) -> None:
"""
Apply reverse incremental search, but keep search buffer focused.
"""
search.do_incremental_search(search.SearchDirection.BACKWARD, count=event.arg)
@key_binding(filter=is_searching)
def forward_incremental_search(event: E) -> None:
"""
Apply forward incremental search, but keep search buffer focused.
"""
search.do_incremental_search(search.SearchDirection.FORWARD, count=event.arg)
@Condition
def _previous_buffer_is_returnable() -> bool:
"""
True if the previously focused buffer has a return handler.
"""
prev_control = get_app().layout.search_target_buffer_control
return bool(prev_control and prev_control.buffer.is_returnable)
@key_binding(filter=is_searching & _previous_buffer_is_returnable)
def accept_search_and_accept_input(event: E) -> None:
"""
Accept the search operation first, then accept the input.
"""
search.accept_search()
event.current_buffer.validate_and_handle()
| gpl-2.0 |
supunkamburugamuva/course-builder | modules/announcements/announcements.py | 2 | 11231 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes and methods to create and manage Announcements."""
__author__ = 'Saifu Angto (saifu@google.com)'
import datetime
import urllib
from controllers.utils import BaseHandler
from controllers.utils import BaseRESTHandler
from controllers.utils import ReflectiveRequestHandler
from controllers.utils import XsrfTokenManager
from models import entities
from models import roles
from models import transforms
from models.models import MemcacheManager
import models.transforms as transforms
import modules.announcements.samples as samples
from modules.oeditor import oeditor
from google.appengine.ext import db
class AnnouncementsRights(object):
"""Manages view/edit rights for announcements."""
@classmethod
def can_view(cls, unused_handler):
return True
@classmethod
def can_edit(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_delete(cls, handler):
return cls.can_edit(handler)
@classmethod
def can_add(cls, handler):
return cls.can_edit(handler)
@classmethod
def apply_rights(cls, handler, items):
"""Filter out items that current user can't see."""
if AnnouncementsRights.can_edit(handler):
return items
allowed = []
for item in items:
if not item.is_draft:
allowed.append(item)
return allowed
class AnnouncementsHandler(BaseHandler, ReflectiveRequestHandler):
"""Handler for announcements."""
default_action = 'list'
get_actions = [default_action, 'edit']
post_actions = ['add', 'delete']
@classmethod
def get_child_routes(cls):
"""Add child handlers for REST."""
return [('/rest/announcements/item', AnnouncementsItemRESTHandler)]
def get_action_url(self, action, key=None):
args = {'action': action}
if key:
args['key'] = key
return self.canonicalize_url(
'/announcements?%s' % urllib.urlencode(args))
def format_items_for_template(self, items):
"""Formats a list of entities into template values."""
template_items = []
for item in items:
item = transforms.entity_to_dict(item)
# add 'edit' actions
if AnnouncementsRights.can_edit(self):
item['edit_action'] = self.get_action_url(
'edit', key=item['key'])
item['delete_xsrf_token'] = self.create_xsrf_token('delete')
item['delete_action'] = self.get_action_url(
'delete', key=item['key'])
template_items.append(item)
output = {}
output['children'] = template_items
# add 'add' action
if AnnouncementsRights.can_edit(self):
output['add_xsrf_token'] = self.create_xsrf_token('add')
output['add_action'] = self.get_action_url('add')
return output
def put_sample_announcements(self):
"""Loads sample data into a database."""
items = []
for item in samples.SAMPLE_ANNOUNCEMENTS:
entity = AnnouncementEntity()
transforms.dict_to_entity(entity, item)
entity.put()
items.append(entity)
return items
def get_list(self):
"""Shows a list of announcements."""
if not self.personalize_page_and_get_enrolled():
return
items = AnnouncementEntity.get_announcements()
if not items and AnnouncementsRights.can_edit(self):
items = self.put_sample_announcements()
items = AnnouncementsRights.apply_rights(self, items)
self.template_value['announcements'] = self.format_items_for_template(
items)
self.template_value['navbar'] = {'announcements': True}
self.render('announcements.html')
def get_edit(self):
"""Shows an editor for an announcement."""
if not AnnouncementsRights.can_edit(self):
self.error(401)
return
key = self.request.get('key')
exit_url = self.canonicalize_url(
'/announcements#%s' % urllib.quote(key, safe=''))
rest_url = self.canonicalize_url('/rest/announcements/item')
form_html = oeditor.ObjectEditor.get_html_for(
self,
AnnouncementsItemRESTHandler.SCHEMA_JSON,
AnnouncementsItemRESTHandler.SCHEMA_ANNOTATIONS_DICT,
key, rest_url, exit_url,
required_modules=AnnouncementsItemRESTHandler.REQUIRED_MODULES)
self.template_value['navbar'] = {'announcements': True}
self.template_value['content'] = form_html
self.render('bare.html')
def post_delete(self):
"""Deletes an announcement."""
if not AnnouncementsRights.can_delete(self):
self.error(401)
return
key = self.request.get('key')
entity = AnnouncementEntity.get(key)
if entity:
entity.delete()
self.redirect('/announcements')
def post_add(self):
"""Adds a new announcement and redirects to an editor for it."""
if not AnnouncementsRights.can_add(self):
self.error(401)
return
entity = AnnouncementEntity()
entity.title = 'Sample Announcement'
entity.date = datetime.datetime.now().date()
entity.html = 'Here is my announcement!'
entity.is_draft = True
entity.put()
self.redirect(self.get_action_url('edit', key=entity.key()))
class AnnouncementsItemRESTHandler(BaseRESTHandler):
"""Provides REST API for an announcement."""
# TODO(psimakov): we should really use an ordered dictionary, not plain
# text; it can't be just a normal dict because a dict iterates its items in
# undefined order; thus when we render a dict to JSON an order of fields
# will not match what we specify here; the final editor will also show the
# fields in an undefined order; for now we use the raw JSON, rather than the
# dict, but will move to an ordered dict late.
SCHEMA_JSON = """
{
"id": "Announcement Entity",
"type": "object",
"description": "Announcement",
"properties": {
"key" : {"type": "string"},
"title": {"optional": true, "type": "string"},
"date": {"optional": true, "type": "date"},
"html": {"optional": true, "type": "html"},
"is_draft": {"type": "boolean"}
}
}
"""
SCHEMA_DICT = transforms.loads(SCHEMA_JSON)
# inputex specific schema annotations to control editor look and feel
SCHEMA_ANNOTATIONS_DICT = [
(['title'], 'Announcement'),
(['properties', 'key', '_inputex'], {
'label': 'ID', '_type': 'uneditable'}),
(['properties', 'date', '_inputex'], {
'label': 'Date', '_type': 'date', 'dateFormat': 'Y/m/d',
'valueFormat': 'Y/m/d'}),
(['properties', 'title', '_inputex'], {'label': 'Title'}),
(['properties', 'html', '_inputex'], {
'label': 'Body', '_type': 'html', 'editorType': 'simple'}),
oeditor.create_bool_select_annotation(
['properties', 'is_draft'], 'Status', 'Draft', 'Published')]
REQUIRED_MODULES = [
'inputex-date', 'gcb-rte', 'inputex-select', 'inputex-string',
'inputex-uneditable']
def get(self):
"""Handles REST GET verb and returns an object as JSON payload."""
key = self.request.get('key')
try:
entity = AnnouncementEntity.get(key)
except db.BadKeyError:
entity = None
if not entity:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
viewable = AnnouncementsRights.apply_rights(self, [entity])
if not viewable:
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
entity = viewable[0]
json_payload = transforms.dict_to_json(transforms.entity_to_dict(
entity), AnnouncementsItemRESTHandler.SCHEMA_DICT)
transforms.send_json_response(
self, 200, 'Success.',
payload_dict=json_payload,
xsrf_token=XsrfTokenManager.create_xsrf_token(
'announcement-put'))
def put(self):
"""Handles REST PUT verb with JSON payload."""
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not self.assert_xsrf_token_or_fail(
request, 'announcement-put', {'key': key}):
return
if not AnnouncementsRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
entity = AnnouncementEntity.get(key)
if not entity:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
payload = request.get('payload')
transforms.dict_to_entity(entity, transforms.json_to_dict(
transforms.loads(payload),
AnnouncementsItemRESTHandler.SCHEMA_DICT))
entity.put()
transforms.send_json_response(self, 200, 'Saved.')
class AnnouncementEntity(entities.BaseEntity):
"""A class that represents a persistent database entity of announcement."""
title = db.StringProperty(indexed=False)
date = db.DateProperty()
html = db.TextProperty(indexed=False)
is_draft = db.BooleanProperty()
memcache_key = 'announcements'
@classmethod
def get_announcements(cls, allow_cached=True):
items = MemcacheManager.get(cls.memcache_key)
if not allow_cached or items is None:
items = AnnouncementEntity.all().order('-date').fetch(1000)
# TODO(psimakov): prepare to exceed 1MB max item size
# read more here: http://stackoverflow.com
# /questions/5081502/memcache-1-mb-limit-in-google-app-engine
MemcacheManager.set(cls.memcache_key, items)
return items
def put(self):
"""Do the normal put() and also invalidate memcache."""
result = super(AnnouncementEntity, self).put()
MemcacheManager.delete(self.memcache_key)
return result
def delete(self):
"""Do the normal delete() and invalidate memcache."""
super(AnnouncementEntity, self).delete()
MemcacheManager.delete(self.memcache_key)
| apache-2.0 |
m1093782566/openstack_org_ceilometer | ceilometer/storage/sqlalchemy/migrate_repo/versions/039_event_floatingprecision_pgsql.py | 5 | 2743 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE (gordc): this is a copy of 024 migration script which missed pgsql
import sqlalchemy as sa
from ceilometer.storage.sqlalchemy import migration
from ceilometer.storage.sqlalchemy import models
def _convert_data_type(table, col, from_t, to_t, pk_attr='id', index=False):
temp_col_n = 'convert_data_type_temp_col'
# Override column we're going to convert with from_t, since the type we're
# replacing could be custom and we need to tell SQLALchemy how to perform
# CRUD operations with it.
table = sa.Table(table.name, table.metadata, sa.Column(col, from_t),
extend_existing=True)
sa.Column(temp_col_n, to_t).create(table)
key_attr = getattr(table.c, pk_attr)
orig_col = getattr(table.c, col)
new_col = getattr(table.c, temp_col_n)
query = sa.select([key_attr, orig_col])
for key, value in migration.paged(query):
(table.update().where(key_attr == key).values({temp_col_n: value}).
execute())
orig_col.drop()
new_col.alter(name=col)
if index:
sa.Index('ix_%s_%s' % (table.name, col), new_col).create()
def upgrade(migrate_engine):
if migrate_engine.name == 'postgresql':
meta = sa.MetaData(bind=migrate_engine)
event = sa.Table('event', meta, autoload=True)
_convert_data_type(event, 'generated', sa.Float(),
models.PreciseTimestamp(),
pk_attr='id', index=True)
trait = sa.Table('trait', meta, autoload=True)
_convert_data_type(trait, 't_datetime', sa.Float(),
models.PreciseTimestamp(),
pk_attr='id', index=True)
def downgrade(migrate_engine):
if migrate_engine.name == 'postgresql':
meta = sa.MetaData(bind=migrate_engine)
event = sa.Table('event', meta, autoload=True)
_convert_data_type(event, 'generated', models.PreciseTimestamp(),
sa.Float(), pk_attr='id', index=True)
trait = sa.Table('trait', meta, autoload=True)
_convert_data_type(trait, 't_datetime', models.PreciseTimestamp(),
sa.Float(), pk_attr='id', index=True)
| apache-2.0 |
hectord/lettuce | tests/integration/lib/Django-1.2.5/django/contrib/webdesign/lorem_ipsum.py | 439 | 4872 | """
Utility functions for generating "lorem ipsum" Latin text.
"""
import random
COMMON_P = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'
WORDS = ('exercitationem', 'perferendis', 'perspiciatis', 'laborum', 'eveniet',
'sunt', 'iure', 'nam', 'nobis', 'eum', 'cum', 'officiis', 'excepturi',
'odio', 'consectetur', 'quasi', 'aut', 'quisquam', 'vel', 'eligendi',
'itaque', 'non', 'odit', 'tempore', 'quaerat', 'dignissimos',
'facilis', 'neque', 'nihil', 'expedita', 'vitae', 'vero', 'ipsum',
'nisi', 'animi', 'cumque', 'pariatur', 'velit', 'modi', 'natus',
'iusto', 'eaque', 'sequi', 'illo', 'sed', 'ex', 'et', 'voluptatibus',
'tempora', 'veritatis', 'ratione', 'assumenda', 'incidunt', 'nostrum',
'placeat', 'aliquid', 'fuga', 'provident', 'praesentium', 'rem',
'necessitatibus', 'suscipit', 'adipisci', 'quidem', 'possimus',
'voluptas', 'debitis', 'sint', 'accusantium', 'unde', 'sapiente',
'voluptate', 'qui', 'aspernatur', 'laudantium', 'soluta', 'amet',
'quo', 'aliquam', 'saepe', 'culpa', 'libero', 'ipsa', 'dicta',
'reiciendis', 'nesciunt', 'doloribus', 'autem', 'impedit', 'minima',
'maiores', 'repudiandae', 'ipsam', 'obcaecati', 'ullam', 'enim',
'totam', 'delectus', 'ducimus', 'quis', 'voluptates', 'dolores',
'molestiae', 'harum', 'dolorem', 'quia', 'voluptatem', 'molestias',
'magni', 'distinctio', 'omnis', 'illum', 'dolorum', 'voluptatum', 'ea',
'quas', 'quam', 'corporis', 'quae', 'blanditiis', 'atque', 'deserunt',
'laboriosam', 'earum', 'consequuntur', 'hic', 'cupiditate',
'quibusdam', 'accusamus', 'ut', 'rerum', 'error', 'minus', 'eius',
'ab', 'ad', 'nemo', 'fugit', 'officia', 'at', 'in', 'id', 'quos',
'reprehenderit', 'numquam', 'iste', 'fugiat', 'sit', 'inventore',
'beatae', 'repellendus', 'magnam', 'recusandae', 'quod', 'explicabo',
'doloremque', 'aperiam', 'consequatur', 'asperiores', 'commodi',
'optio', 'dolor', 'labore', 'temporibus', 'repellat', 'veniam',
'architecto', 'est', 'esse', 'mollitia', 'nulla', 'a', 'similique',
'eos', 'alias', 'dolore', 'tenetur', 'deleniti', 'porro', 'facere',
'maxime', 'corrupti')
COMMON_WORDS = ('lorem', 'ipsum', 'dolor', 'sit', 'amet', 'consectetur',
'adipisicing', 'elit', 'sed', 'do', 'eiusmod', 'tempor', 'incididunt',
'ut', 'labore', 'et', 'dolore', 'magna', 'aliqua')
def sentence():
"""
Returns a randomly generated sentence of lorem ipsum text.
The first word is capitalized, and the sentence ends in either a period or
question mark. Commas are added at random.
"""
# Determine the number of comma-separated sections and number of words in
# each section for this sentence.
sections = [u' '.join(random.sample(WORDS, random.randint(3, 12))) for i in range(random.randint(1, 5))]
s = u', '.join(sections)
# Convert to sentence case and add end punctuation.
return u'%s%s%s' % (s[0].upper(), s[1:], random.choice('?.'))
def paragraph():
"""
Returns a randomly generated paragraph of lorem ipsum text.
The paragraph consists of between 1 and 4 sentences, inclusive.
"""
return u' '.join([sentence() for i in range(random.randint(1, 4))])
def paragraphs(count, common=True):
"""
Returns a list of paragraphs as returned by paragraph().
If `common` is True, then the first paragraph will be the standard
'lorem ipsum' paragraph. Otherwise, the first paragraph will be random
Latin text. Either way, subsequent paragraphs will be random Latin text.
"""
paras = []
for i in range(count):
if common and i == 0:
paras.append(COMMON_P)
else:
paras.append(paragraph())
return paras
def words(count, common=True):
"""
Returns a string of `count` lorem ipsum words separated by a single space.
If `common` is True, then the first 19 words will be the standard
'lorem ipsum' words. Otherwise, all words will be selected randomly.
"""
if common:
word_list = list(COMMON_WORDS)
else:
word_list = []
c = len(word_list)
if count > c:
count -= c
while count > 0:
c = min(count, len(WORDS))
count -= c
word_list += random.sample(WORDS, c)
else:
word_list = word_list[:count]
return u' '.join(word_list)
| gpl-3.0 |
zesk06/scores | scores/play.py | 1 | 6789 | #!/usr/bin/env python
# encoding: utf-8
"""
The Play management
"""
import datetime
from mongokit import Document, DocumentMigration
class PlayMigration(DocumentMigration):
"""A DocumentMigration for the Play class"""
def __init__(self, *args):
DocumentMigration.__init__(self, *args)
def allmigration01_add_comment(self):
"""Add the comment field to all"""
self.target = {'comment': {'$exists': False}} # pylint: disable=W0201
self.update = {'$set': {'comment': None}} # pylint: disable=W0201
def allmigration02_add_reason(self):
"""Add the comment field to all"""
self.target = {'winners_reason': {'$exists': False}} # pylint: disable=W0201
self.update = {'$set': {'winners_reason': []}} # pylint: disable=W0201
def allmigration03_add_created_by(self):
"""Add the created_by play field"""
self.target = {'created_by': {'$exists': False}} # pylint: disable=W0201
self.update = {'$set': {'created_by': 'migration'}} # pylint: disable=W0201
class Play(Document):
"""
A database Play
"""
def __init__(self, *args, **kwargs):
# Document needs a lot of parameters
Document.__init__(self, *args, **kwargs)
# store the elo per player (before, after)
self.elos_per_player = {}
# with this you will be able to use
# play.date = blah
# play.players = blah2
use_dot_notation = True
__collection__ = 'plays'
structure = {
'date': datetime.datetime,
'game': basestring,
'created_by': basestring, # who created the play
'winners': [basestring], # a forced list of winners
'winners_reason': [basestring], # The forced list of winner reason
'wintype': basestring, # max or min
'comment': basestring, # A play comment
'players': [
{
'login': basestring,
'score': int,
'role': basestring,
'color': basestring,
'team': basestring,
'team_color': basestring
}
]
}
required_fields = ['date', 'game']
default_values = {
'winners': [],
'players': [],
'wintype': 'max'
}
def set_date(self, date):
"""Set the date
:type date: datetime.datetime"""
self['date'] = date
def set_game(self, game):
"""Set the game
:type game: basestring"""
self['game'] = game
def set_created_by(self, creator):
"""Set the created_by field
:type creator: basestring"""
self['created_by'] = creator
def add_player(self, player_dict):
"""Adds a new player to the play
:type player_dict: dict
{
'login': basestring,
'score': int,
'role': basestring,
'color': basestring,
'team': basestring,
'team_color': basestring
}
"""
self['players'].append(player_dict)
@staticmethod
def create_player(login, score, role=None, team=None):
"""Return a player instance
suitable to be added using the add_player method
:rtype: dict"""
return {
'login': login,
'score': score,
'role': role,
'color': None,
'team': team,
'team_color': None
}
def get_player(self, login):
"""Return the player with the given login"""
for player in self['players']:
if player['login'] == login:
return player
raise ValueError('player with login %s not found' % login)
def get_player_order(self):
"return a list of tuple [(score, [players])] ordered per score"
player_per_score = {}
for (player, score) in [(player['login'], player['score'])
for player in self['players']]:
if score not in player_per_score:
player_per_score[score] = []
player_per_score[score].append(player)
if hasattr(self, 'wintype') and self['wintype'] == 'min':
return sorted(player_per_score.items(), key=lambda x: x[0])
return sorted(player_per_score.items(),
key=lambda x: x[0], reverse=True)
def get_player_position(self, login):
"""Return the position of the player with the given login
:type login: basestring
:rtype: int
"""
for index, score_players in enumerate(self.get_player_order()):
players = score_players[1]
if login in players:
return index + 1
raise ValueError('Player with login %s not found in play %s' % (login, self))
def get_winners(self):
"return the list of player names that wins the play"
if self['winners'] is not None and \
isinstance(self['winners'], list) and \
self['winners'] != []:
return self['winners']
elif self['winners'] is not None and not isinstance(self['winners'], list):
raise TypeError('Expected type for winners is list but found %s' %
type(self['winners']))
order = self.get_player_order()
if order != []:
return self.get_player_order()[0][1]
return []
def get_highest_score(self):
"return the high score of the play"
order = self.get_player_order()
if order != []:
return order[0][0]
return 0
def get_lowest_score(self):
"return the lowest score of the play"
order = self.get_player_order()
if order != []:
return order[-1][0]
return 0
# pylint: disable=C0103
@property
def id(self):
"""return the id"""
return '%s' % self['_id']
@property
def is_max(self):
"""Return True if play has a maxtype score"""
return 'wintype' in self and self['wintype'] == 'max'
@property
def teams(self):
"""Return the map of teams
{ name: team_name, players: [...]}
"""
teams = dict()
for player in self.players:
team = player['team']
if team not in teams:
teams[team] = []
teams[team].append(player['login'])
return teams
def set_elos(self, elos_per_player):
"""Set the elos per player
:param elos_per_player: The elos per player whre key is player login
and value is a tuple (elo_pre_play, elo_post_play)
:type elos_per_player: dict(basestring, tuple(int,int))"""
self.elos_per_player = elos_per_player
| mit |
patchew-project/patchew | www/urls.py | 1 | 1724 | #!/usr/bin/env python3
#
# Copyright 2016 Red Hat, Inc.
#
# Authors:
# Fam Zheng <famz@redhat.com>
#
# This work is licensed under the MIT License. Please see the LICENSE file or
# http://opensource.org/licenses/MIT.
from django.conf.urls import url
from django.contrib.auth import views as auth_views
from . import views
from mod import dispatch_module_hook
urlpatterns = []
dispatch_module_hook("www_url_hook", urlpatterns=urlpatterns)
urlpatterns += [
url("^login/$",
auth_views.LoginView.as_view(template_name="login.html"),
name="login"),
url("^logout/$",
auth_views.LogoutView.as_view(), name="logout"),
url("^change-password/$",
auth_views.PasswordChangeView.as_view(template_name="password-change.html"),
name="password_change"),
url("^change-password/done/$",
auth_views.PasswordChangeDoneView.as_view(template_name="password-change-done.html"),
name="password_change_done"),
url(r"^search$", views.view_search, name="search"),
url(r"^search-help$", views.view_search_help, name="search_help"),
url(r"^(?P<project>[^/]*)/$", views.view_series_list, name="series_list"),
url(r"^(?P<project>[^/]*)/info$", views.view_project_detail, name="project_detail"),
url(
r"^(?P<project>[^/]*)/(?P<message_id>[^/]*)/$",
views.view_series_detail,
name="series_detail",
),
url(
r"^(?P<project>[^/]*)/(?P<thread_id>[^/]*)/(?P<message_id>[^/]*)/$",
views.view_series_message,
name="series_message",
),
url(
r"^(?P<project>[^/]*)/(?P<message_id>[^/]*)/mbox$", views.view_mbox, name="mbox"
),
url(r"^$", views.view_project_list, name="project_list"),
]
| mit |
Trust-Code/server-tools | base_export_manager/models/__init__.py | 12 | 1190 | # -*- coding: utf-8 -*-
# Python source code encoding : https://www.python.org/dev/peps/pep-0263/
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright :
# (c) 2015 Antiun Ingenieria, SL (Madrid, Spain, http://www.antiun.com)
# Antonio Espinosa <antonioea@antiun.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import ir_exports_line
| agpl-3.0 |
yekaylee/capstone | capstoneRepo/FacialRecognition/FinalFaceRecog_code/picam_version/FaceRecog_trainlocal.py | 2 | 8869 | import sys
import os.path
import fnmatch
import cv2
import numpy as np
import face
import shutil
from picamera import PiCamera
from picamera.array import PiRGBArray
import time
TRAINING_FILE='train_LBPH.xml'
BASE_PATH="training/negative"
cascadePath = "haarcascade_frontalface_alt.xml"
LOOKUP_FILE='lookup_table.txt'
ENROLLMENT_FILE='enrollment.txt'
CSV_FILE='CSV.txt'
def walk_files(directory, match='*'):
"""Generator function to iterate through all files in a directory recursively
which match the given filename match parameter.
"""
for root, dirs, files in os.walk(directory):
for filename in fnmatch.filter(files, match):
yield os.path.join(root, filename)
def prepare_image(filename):
"""Read an image as grayscale and resize it to the appropriate size for
training the face recognition model.
"""
return face.resize(cv2.imread(filename, cv2.IMREAD_GRAYSCALE))
def normalize(X, low, high, dtype=None):
"""Normalizes a given array in X to a value between low and high."""
X = np.asarray(X)
minX, maxX = np.min(X), np.max(X)
# normalize to [0...1].
X = X - float(minX)
X = X / float((maxX - minX))
# scale to [low...high].
X = X * (high-low)
X = X + low
if dtype is None:
return np.asarray(X)
return np.asarray(X, dtype=dtype)
#----------------------------------------------------------------------------------------------Load LOOKUP TABLE
def load_table(filename,lookup_table,sample_images):
t=open(filename,'r+')
line=t.readline()
#lookup_table=[]
#sample_images=[]
while line!="":
two=line.split(";")
folder_name=two[0]
imageName=two[1]
lookup_table.append(folder_name)
sample_images.append(imageName)
#print "folder: "+folder_name+ " !!" +imageName
line=t.readline()
#----------------------------------------------------------------------------------------------Create CSV and LOOKUP table
def create_csv():
#if len(sys.argv) != 2:
#print "usage: create_csv <base_path>"
#sys.exit(1)
SEPARATOR=";"
lookup_table=[]
f=open(CSV_FILE,'w')
t=open(LOOKUP_FILE,'w')
label = 0
for dirname, dirnames, filenames in os.walk(BASE_PATH):
for subdirname in dirnames:
print "!! "+subdirname
#subject_path = os.path.join(dirname, subdirname)
subject_path ="%s/%s" % (dirname, subdirname)
for filename in os.listdir(subject_path):
abs_path = "%s\%s" % (subject_path, filename)
# added to create right directorys in linux
abs_path2="%s/%s" % (subject_path,filename)
seq=''.join([str(abs_path),str(SEPARATOR),str(label),'\n'])
#Sprint seq
f.write(seq)
#print "%s%s%d" % (abs_path, SEPARATOR, label)
label = label + 1
lookup_table.append(subdirname)
t.write(''.join([str(subdirname),';',abs_path2,';\n']));
print lookup_table
# use lookup_table[label] to look up the specific folder of that label
f.close()
t.close()
#--------------------------------------------------------------------------------------------TRAIN THE SYSTEM (RUN ONLY ONCE)
def trainLBPH():
faces = []
labels = []
labelnum=[]
temp=10000
totalcount=0
f=open(CSV_FILE,'r+')
s=f.readline()
while s!="":
#print s
list=s.split(';')
#print list
path=str(list[0]).split('\\')
#print path[0]
num=str(list[1]).split('\n')
if temp!=int(num[0]):
temp=int(num[0])
print num[0]
tempcount=0
labelnum.append(int(num[0]))
for filename in walk_files(path[0],'*.pgm'):
#print filename
faces.append(prepare_image(filename))
labels.append(labelnum[int(num[0])])
tempcount += 1
totalcount += 1
else:
while tempcount > 0:
s=f.readline()
tempcount -= 1
print num[0]+":"+s+"!!"+str(tempcount)+"\n"
continue
print 'Read', totalcount, 'images'
#print np.asarray(labels).shape
#print np.asarray(faces).shape
#Train model
print 'Training model...'
model = cv2.createLBPHFaceRecognizer()
model.train(np.asarray(faces), np.asarray(labels))
#Save model results
model.save(TRAINING_FILE)
print 'Training data saved to', TRAINING_FILE
#-------------------------------------------------------------------------------------Enroll and update
def LBPHupdate(ID):
labels=[]
images=[]
# make sure this is the right file name
faceCascade = cv2.CascadeClassifier(cascadePath)
counter=0
#counter2=0
foldername=ID;
#update database
print 'Loading training data...'
model=cv2.createLBPHFaceRecognizer()
model.load(TRAINING_FILE)
print 'Training data loaded!'
f=open(CSV_FILE,'r+')
t=open(LOOKUP_FILE,'r+')
en=open(ENROLLMENT_FILE,'r+')
#Get label
f.seek(-10,2)
s=f.readline()
#print s
list=s.split(';')
num=str(list[1]).split('\n')
#new label no.
label=int(num[0])+1
#print label
f.seek(0,2)
t.seek(0,2)
en.seek(0,2)
faces=[]
labels=[]
DIRECTORY=foldername
#print DIRECTORY
SEPARATOR=";"
for files in os.listdir(DIRECTORY):
abs_path="%s\%s"%(DIRECTORY,files)
seq=''.join([str(abs_path),str(SEPARATOR),str(label),'\n'])
f.write(seq)
t.write(''.join([str(DIRECTORY),';',abs_path,';\n']));
en.write(''.join([str(label),'\n']))
f.close()
t.close()
en.close()
for filename in walk_files(DIRECTORY,'*.pgm'):
#print filename
faces.append(prepare_image(filename))
labels.append(label)
model.update(np.asarray(faces), np.asarray(labels))
#print model
#Save model results
model.save(TRAINING_FILE)
print 'Training data saved to',TRAINING_FILE
print "successfully updated"
#shutil.rmtree(foldername)
#------------------------------------------------------------------------------------------
def Authenticate():
#load lookup table_ ky
tableName=LOOKUP_FILE
table=[]
samples=[]
load_table(tableName,table,samples)
# Create window
cv2.namedWindow("Preview")
#cv2.namedWindow("Compared")
# Load training data into model
print 'Loading training data...'
model = cv2.createLBPHFaceRecognizer()
model.load(TRAINING_FILE)
print 'Training data loaded!'
confidences=[]
labels=[]
camera=PiCamera()
camera.resolution=(320,240)
camera.framerate=32
rawCapture=PiRGBArray(camera,size=(320,240))
time.sleep(3)
count=30
recognition=0
print 'Looking for face...'
camera.capture(rawCapture,format="bgr",use_video_port=True)
while rawCapture is not None:
image=rawCapture.array
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
result=face.detect_single(gray)
cv2.imshow("Preview",image)
key=cv2.waitKey(1)
if result is None:
print "Please face to the camera "
else:
x, y, w, h = result
# Crop and resize image to face
crop = face.resize(face.crop(gray, x, y, w, h))
label, confidence = model.predict(crop)
confidences.append(confidence)
labels.append(label)
cv2.waitKey(1)
count -= 1
if count<=0:
break
rawCapture.truncate(0)
camera.capture(rawCapture,format="bgr",use_video_port=True)
print "finish capturing faces"
camera.close()
cv2.destroyWindow("Preview")
temp=[]
i=0
length=len(labels)
while length>0:
if i==0:
temp.append(labels[length-1])
i += 1
length -= 1
else:
tempi=0
while tempi<i:
if labels[length-1]!=temp[tempi]:
tempi += 1
else:
length -=1
break
if tempi == i:
temp.append(labels[length-1])
i += 1
length -= 1
print "------LABELS:{}".format(labels)
print "------DIFFERENT LABELS:{}".format(temp)
print "------NUMBER OF DIFFERENT LABELS:{}".format(i)
tempi=0
numoflabel=0
if i > 5:
print "could not enter"
#print labels
return 0,-1
else:
element=temp[tempi]
while tempi < i:
tempj=0
count=0
while tempj<len(labels):
if labels[tempj]==temp[tempi]:
count += 1
tempj += 1
if count > numoflabel :
numoflabel=count
element=temp[tempi]
tempi += 1
print "element is {}, numoflabel is {}".format(element, numoflabel)
tempi = 0
con=0
while tempi < len(labels):
if labels[tempi]==element:
con=con+confidences[tempi]
tempi += 1
ave=con/numoflabel
print "mean of confidences is {}".format(ave)
print confidences
# print recognition
f=open(ENROLLMENT_FILE,'r')
s=f.readline()
flag=0
while s!="":
index=int(s)
#print index
if index==element:
flag=1
print "flag TRUE"
break
s=f.readline()
if ave < 50 and flag==1:
print "authenticated"
return 1,element
else:
print "could not enter"
return 0,-1
#------------------------------------------------------------------------------------TESTING MAIN
if __name__ == '__main__':
#------------run this first to train the system
#create_csv()
#trainLBPH()
#------------enroll new user
##name="kaylee YE"
#LBPHupdate(name)
#------------authenticate
successful,label = Authenticate()
print successful
print label
| bsd-3-clause |
trailhunger/DEM-Tools | Hillup/data/__init__.py | 1 | 9732 | """ Starting point for DEM retrieval utilities.
"""
from math import pi, sin, cos
from os import unlink, close
from itertools import product
from tempfile import mkstemp
import NED10m, NED100m, NED1km, SRTM1, SRTM3
from ModestMaps.Core import Coordinate
from TileStache.Geography import SphericalMercator
from TileStache.Core import Layer, Metatile
from TileStache.Config import Configuration
from TileStache.Caches import Disk
from osgeo import gdal, osr
from PIL import Image
import numpy
from .. import save_slope_aspect
# used to prevent clobbering in /vsimem/, see:
# http://osgeo-org.1803224.n2.nabble.com/gdal-dev-Outputting-to-vsimem-td6221295.html
vsimem_counter = 1
#
# Set up some useful projections.
#
osr.UseExceptions() # <-- otherwise errors will be silent and useless.
webmerc_proj = SphericalMercator()
webmerc_sref = osr.SpatialReference()
webmerc_sref.ImportFromProj4(webmerc_proj.srs)
class SeedingLayer (Layer):
""" Tilestache-compatible seeding layer for preparing tiled data.
Intended for use in hillup-seed.py script for preparing a tile directory.
"""
def __init__(self, demdir, tiledir, tmpdir, source):
"""
"""
cache = Disk(tiledir, dirs='safe')
config = Configuration(cache, '.')
Layer.__init__(self, config, SphericalMercator(), Metatile())
self.provider = Provider(self, demdir, tmpdir, source)
def name(self):
return '.'
class Provider:
""" TileStache provider for generating tiles of DEM slope and aspect data.
Source parameter can be "srtm-ned" (default) or "ned-only".
See http://tilestache.org/doc/#custom-providers for information
on how the Provider object interacts with TileStache.
"""
def __init__(self, layer, demdir, tmpdir=None, source='srtm-ned'):
self.tmpdir = tmpdir
self.demdir = demdir
self.source = source
def getTypeByExtension(self, ext):
if ext.lower() != 'tiff':
raise Exception()
return 'image/tiff', 'TIFF'
def renderArea(self, width, height, srs, xmin, ymin, xmax, ymax, zoom):
""" Return an instance of SlopeAndAspect for requested area.
"""
assert srs == webmerc_proj.srs # <-- good enough for now
if self.source == 'srtm-ned':
providers = choose_providers_srtm(zoom)
elif self.source == 'ned-only':
providers = choose_providers_ned(zoom)
else:
raise Exception('Unknown source "%s"' % source)
#
# Prepare information for datasets of the desired extent and projection.
#
xres = (xmax - xmin) / width
yres = (ymin - ymax) / height
area_wkt = webmerc_sref.ExportToWkt()
buffered_xform = xmin - xres, xres, 0, ymax - yres, 0, yres
#
# Reproject and merge DEM datasources into destination datasets.
#
driver = gdal.GetDriverByName('GTiff')
elevation = numpy.zeros((width+2, height+2), numpy.float32)
for (module, proportion) in providers:
cs2cs = osr.CoordinateTransformation(webmerc_sref, module.sref)
minlon, minlat, z = cs2cs.TransformPoint(xmin, ymin)
maxlon, maxlat, z = cs2cs.TransformPoint(xmax, ymax)
try:
handle, filename = mkstemp(dir=self.tmpdir, prefix='render-area-provider-', suffix='.tif')
close(handle)
ds_area = driver.Create(filename, width+2, height+2, 1, gdal.GDT_Float32)
ds_area.SetGeoTransform(buffered_xform)
ds_area.SetProjection(area_wkt)
ds_args = minlon, minlat, maxlon, maxlat, self.demdir
for ds_dem in module.datasources(*ds_args):
# estimate the raster density across source DEM and output
dem_samples = (maxlon - minlon) / ds_dem.GetGeoTransform()[1]
area_pixels = (xmax - xmin) / ds_area.GetGeoTransform()[1]
if dem_samples > area_pixels:
# cubic looks better squeezing down
resample = gdal.GRA_Cubic
else:
# cubic spline looks better stretching out
resample = gdal.GRA_CubicSpline
gdal.ReprojectImage(ds_dem, ds_area, ds_dem.GetProjection(), ds_area.GetProjection(), resample)
ds_dem.FlushCache()
if proportion == 1:
elevation = ds_area.ReadAsArray()
else:
elevation += ds_area.ReadAsArray() * proportion
ds_area.FlushCache()
finally:
unlink(filename)
#
# Calculate and save slope and aspect.
#
slope, aspect = calculate_slope_aspect(elevation, xres, yres)
tile_xform = xmin, xres, 0, ymax, 0, yres
return SlopeAndAspect(self.tmpdir, slope, aspect, area_wkt, tile_xform)
class SlopeAndAspect:
""" TileStache response object with PIL-like save() and crop() methods.
This object knows only how to save two-band 8-bit GeoTIFFs.
See http://tilestache.org/doc/#custom-providers for information
on how the SlopeAndAspect object interacts with TileStache.
"""
def __init__(self, tmpdir, slope, aspect, wkt, xform):
""" Instantiate with array of slope and aspect, and minimal geographic information.
"""
self.tmpdir = tmpdir
self.slope = slope
self.aspect = aspect
self.w, self.h = self.slope.shape
self.wkt = wkt
self.xform = xform
def save(self, output, format):
""" Save a two-band GeoTIFF to output file-like object.
"""
if format != 'TIFF':
raise Exception('File format other than TIFF for slope and aspect: "%s"' % format)
save_slope_aspect(self.slope, self.aspect, self.wkt, self.xform, output, self.tmpdir)
def crop(self, box):
""" Returns a rectangular region from the current image.
Box is a 4-tuple with left, upper, right, and lower pixels.
Not yet implemented!
"""
raise NotImplementedError()
def choose_providers_srtm(zoom):
""" Return a list of data sources and proportions for given zoom level.
Each data source is a module such as SRTM1 or SRTM3, and the proportions
must all add up to one. Return list has either one or two items.
"""
if zoom <= SRTM3.ideal_zoom:
return [(SRTM3, 1)]
elif SRTM3.ideal_zoom < zoom and zoom < SRTM1.ideal_zoom:
#bottom, top = SRTM3, SRTM1 # SRTM1 looks terrible
bottom, top = SRTM3, NED10m
elif zoom == SRTM1.ideal_zoom:
#return [(SRTM1, 1)] # SRTM1 looks terrible
bottom, top = SRTM3, NED10m
elif SRTM1.ideal_zoom < zoom and zoom < NED10m.ideal_zoom:
#bottom, top = SRTM1, NED10m # SRTM1 looks terrible
bottom, top = SRTM3, NED10m
elif zoom >= NED10m.ideal_zoom:
return [(NED10m, 1)]
difference = float(top.ideal_zoom) - float(bottom.ideal_zoom)
proportion = 1. - (zoom - float(bottom.ideal_zoom)) / difference
return [(bottom, proportion), (top, 1 - proportion)]
def choose_providers_ned(zoom):
""" Return a list of data sources and proportions for given zoom level.
Each data source is a module such as NED10m or NED1km, and the proportions
must all add up to one. Return list has either one or two items.
"""
if zoom <= NED1km.ideal_zoom:
return [(NED1km, 1)]
elif NED1km.ideal_zoom < zoom and zoom < NED100m.ideal_zoom:
#bottom, top = NED1km, NED100m
bottom, top = NED1km, NED100m
elif zoom == NED100m.ideal_zoom:
return [(NED100m, 1)]
elif NED100m.ideal_zoom < zoom and zoom < NED10m.ideal_zoom:
#bottom, top = NED100m, NED10m
bottom, top = NED100m, NED10m
elif zoom >= NED10m.ideal_zoom:
return [(NED10m, 1)]
difference = float(top.ideal_zoom) - float(bottom.ideal_zoom)
proportion = 1. - (zoom - float(bottom.ideal_zoom)) / difference
return [(bottom, proportion), (top, 1 - proportion)]
def calculate_slope_aspect(elevation, xres, yres, z=1.0):
""" Return a pair of arrays 2 pixels smaller than the input elevation array.
Slope is returned in radians, from 0 for sheer face to pi/2 for
flat ground. Aspect is returned in radians, counterclockwise from -pi
at north around to pi.
Logic here is borrowed from hillshade.cpp:
http://www.perrygeo.net/wordpress/?p=7
"""
width, height = elevation.shape[0] - 2, elevation.shape[1] - 2
window = [z * elevation[row:(row + height), col:(col + width)]
for (row, col)
in product(range(3), range(3))]
x = ((window[0] + window[3] + window[3] + window[6]) \
- (window[2] + window[5] + window[5] + window[8])) \
/ (8.0 * xres);
y = ((window[6] + window[7] + window[7] + window[8]) \
- (window[0] + window[1] + window[1] + window[2])) \
/ (8.0 * yres);
# in radians, from 0 to pi/2
slope = pi/2 - numpy.arctan(numpy.sqrt(x*x + y*y))
# in radians counterclockwise, from -pi at north back to pi
aspect = numpy.arctan2(x, y)
return slope, aspect
| bsd-3-clause |
jiaxiaolei/TorCMS | torlite/model/mpost.py | 1 | 7434 | # -*- coding:utf-8 -*-
'''
Author: Bu Kun
E-mail: bukun@osgeo.cn
CopyRight: http://www.yunsuan.org
'''
import time
import tornado.escape
import config
import peewee
import datetime
from torlite.model.core_tab import CabPost2Catalog
from torlite.core import tools
from torlite.model.core_tab import CabPost
class MPost():
def __init__(self):
try:
CabPost.create_table()
except:
pass
def update(self, uid, post_data, update_time=False):
if 'id_spec' in post_data:
id_spec = post_data['id_spec'][0]
else:
id_spec = 0
if 'src_type' in post_data and post_data['src_type'][0] == '1':
cnt_html = tools.rst2html(post_data['cnt_md'][0])
else:
cnt_html = tools.markdown2html(post_data['cnt_md'][0])
if update_time == True:
entry = CabPost.update(
title=post_data['title'][0],
date=datetime.datetime.now(),
cnt_html=cnt_html,
user_name=post_data['user_name'],
cnt_md=tornado.escape.xhtml_escape(post_data['cnt_md'][0]),
time_update=time.time(),
id_spec=id_spec,
logo=post_data['logo'][0],
keywords=post_data['keywords'][0],
src_type=post_data['src_type'][0] if ('src_type' in post_data) else 0
).where(CabPost.uid == uid)
else:
entry = CabPost.update(
title=post_data['title'][0],
cnt_html=cnt_html,
user_name=post_data['user_name'],
cnt_md=tornado.escape.xhtml_escape(post_data['cnt_md'][0]),
id_spec=id_spec,
logo=post_data['logo'][0],
keywords=post_data['keywords'][0],
src_type=post_data['src_type'][0] if ('src_type' in post_data) else 0
).where(CabPost.uid == uid)
entry.execute()
def insert_data(self, id_post, post_data):
uu = self.get_by_id(id_post)
if uu is None:
pass
else:
return (False)
if 'id_spec' in post_data:
id_spec = post_data['id_spec'][0]
else:
id_spec = 0
if 'src_type' in post_data and post_data['src_type'][0] == '1':
cnt_html = tools.rst2html(post_data['cnt_md'][0])
else:
cnt_html = tools.markdown2html(post_data['cnt_md'][0])
entry = CabPost.create(
title=post_data['title'][0],
date=datetime.datetime.now(),
cnt_html=cnt_html,
uid=id_post,
time_create=time.time(),
user_name=post_data['user_name'],
cnt_md=tornado.escape.xhtml_escape(post_data['cnt_md'][0]),
time_update=time.time(),
view_count=1,
id_spec=id_spec,
logo=post_data['logo'][0],
keywords=post_data['keywords'][0],
src_type=post_data['src_type'][0] if ('src_type' in post_data) else 0
)
return (id_post)
def query_old(self):
return CabPost.select().order_by('time_update').limit(10)
def query_random(self, num=6):
if config.dbtype == 1 or config.dbtype == 3:
return CabPost.select().order_by(peewee.fn.Random()).limit(num)
elif config.dbtype == 2:
return CabPost.select().order_by(peewee.fn.Rand()).limit(num)
def query_cat_random(self, cat_id, num=6):
if cat_id == '':
return self.query_random(num)
if config.dbtype == 1 or config.dbtype == 3:
return CabPost.select().join(CabPost2Catalog).where(CabPost2Catalog.catalog == cat_id).order_by(
peewee.fn.Random()).limit(num)
elif config.dbtype == 2:
return CabPost.select().join(CabPost2Catalog).where(CabPost2Catalog.catalog == cat_id).order_by(
peewee.fn.Rand()).limit(num)
def get_by_id(self, in_uid):
recs = CabPost.select().where(CabPost.uid == in_uid)
if recs.count() == 0:
return None
else:
return recs.get()
# def get_by_uid(self, in_uid):
# recs = CabPost.select().where(CabPost.uid == in_uid)
#
# return recs
def get_num_by_cat(self, cat_str):
return CabPost.select().where(CabPost.id_cats.contains(',{0},'.format(cat_str))).count()
def query_all(self):
return CabPost.select()
def query_keywords_empty(self):
return CabPost.select().where(CabPost.keywords == '')
def query_recent(self, num=8):
return CabPost.select().order_by(CabPost.time_update.desc()).limit(num)
def query_dated(self, num=8):
return CabPost.select().order_by(CabPost.time_update.asc()).limit(num)
def query_cat_recent(self, cat_id, num=8):
return CabPost.select().join(CabPost2Catalog).where(CabPost2Catalog.catalog == cat_id).order_by(
CabPost.time_update.desc()).limit(num)
def query_most(self, num=8):
return CabPost.select().order_by(CabPost.view_count.desc()).limit(num)
def query_recent_most(self, num=8, recent=30):
time_that = int(time.time()) - recent * 24 * 3600
return CabPost.select().where(CabPost.time_update > time_that).order_by(CabPost.view_count.desc()).limit(num)
def query_cat_by_pager(self, cat_str, cureent):
tt = CabPost.select().where(CabPost.id_cats.contains(str(cat_str))).order_by(
CabPost.time_update.desc()).paginate(cureent, config.page_num)
return tt
def update_view_count(self, citiao):
entry = CabPost.update(view_count=CabPost.view_count + 1).where(CabPost.title == citiao)
entry.execute()
def update_view_count_by_uid(self, uid):
entry = CabPost.update(view_count=CabPost.view_count + 1).where(CabPost.uid == uid)
entry.execute()
def update_keywords(self, uid, inkeywords):
entry = CabPost.update(keywords=inkeywords).where(CabPost.uid == uid)
entry.execute()
def get_by_wiki(self, citiao):
tt = CabPost.select().where(CabPost.title == citiao).count()
if tt == 0:
return None
else:
self.update_view_count(citiao)
return CabPost.get(CabPost.title == citiao)
def get_next_record(self, in_uid):
current_rec = self.get_by_id(in_uid)
query = CabPost.select().where(CabPost.time_update < current_rec.time_update).order_by(
CabPost.time_update.desc())
if query.count() == 0:
return None
else:
return query.get()
def get_previous_record(self, in_uid):
current_rec = self.get_by_id(in_uid)
query = CabPost.select().where(CabPost.time_update > current_rec.time_update).order_by(CabPost.time_update)
if query.count() == 0:
return None
else:
return query.get()
def query_by_spec(self, spec_id):
tt = CabPost.select().where(CabPost.id_spec == spec_id).order_by(CabPost.time_update.desc())
return tt
def get_by_keyword(self, par2):
return CabPost.select().where(CabPost.title.contains(par2)).order_by(CabPost.time_update.desc()).limit(20)
def get_by_uid(self, sig):
#return CabPost.get(uid=sig)
try:
return CabPost.get(uid=sig)
except:
return False | mit |
espadrine/opera | chromium/src/tools/telemetry/telemetry/core/web_contents.py | 32 | 2454 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEFAULT_WEB_CONTENTS_TIMEOUT = 60
# TODO(achuith, dtu, nduca): Add unit tests specifically for WebContents,
# independent of Tab.
class WebContents(object):
"""Represents web contents in the browser"""
def __init__(self, inspector_backend):
self._inspector_backend = inspector_backend
def __del__(self):
self.Disconnect()
def Disconnect(self):
self._inspector_backend.Disconnect()
def Close(self):
"""Closes this page.
Not all browsers or browser versions support this method.
Be sure to check browser.supports_tab_control."""
self._inspector_backend.Close()
def WaitForDocumentReadyStateToBeComplete(self,
timeout=DEFAULT_WEB_CONTENTS_TIMEOUT):
self._inspector_backend.WaitForDocumentReadyStateToBeComplete(timeout)
def WaitForDocumentReadyStateToBeInteractiveOrBetter(self,
timeout=DEFAULT_WEB_CONTENTS_TIMEOUT):
self._inspector_backend.WaitForDocumentReadyStateToBeInteractiveOrBetter(
timeout)
def ExecuteJavaScript(self, expr, timeout=DEFAULT_WEB_CONTENTS_TIMEOUT):
"""Executes expr in JavaScript. Does not return the result.
If the expression failed to evaluate, EvaluateException will be raised.
"""
self._inspector_backend.ExecuteJavaScript(expr, timeout)
def EvaluateJavaScript(self, expr, timeout=DEFAULT_WEB_CONTENTS_TIMEOUT):
"""Evalutes expr in JavaScript and returns the JSONized result.
Consider using ExecuteJavaScript for cases where the result of the
expression is not needed.
If evaluation throws in JavaScript, a Python EvaluateException will
be raised.
If the result of the evaluation cannot be JSONized, then an
EvaluationException will be raised.
"""
return self._inspector_backend.EvaluateJavaScript(expr, timeout)
@property
def message_output_stream(self):
return self._inspector_backend.message_output_stream
@message_output_stream.setter
def message_output_stream(self, stream):
self._inspector_backend.message_output_stream = stream
@property
def timeline_model(self):
return self._inspector_backend.timeline_model
def StartTimelineRecording(self):
self._inspector_backend.StartTimelineRecording()
def StopTimelineRecording(self):
self._inspector_backend.StopTimelineRecording()
| bsd-3-clause |
GheRivero/ansible | docs/bin/find-plugin-refs.py | 106 | 3338 | #!/usr/bin/env python
# To run this script, first make webdocs in the toplevel of the checkout. This will generate all
# rst files from their sources. Then run this script ./docs/bin/find-plugin-refs.py
#
# No output means that there are no longer any bare module and plugin names referenced via :ref:
#
# For my listing of what needs to be changed after running this script, see the comment at the end
# of the file
import glob
import os
import re
from ansible.module_utils._text import to_text
TOPDIR = os.path.join(os.path.dirname(__file__), '..', 'docsite', 'rst')
def plugin_names(topdir):
plugins = set()
# Modules are in a separate directory
for module_filename in glob.glob(os.path.join(topdir, 'modules', '*_module.rst')):
module_filename = os.path.basename(module_filename)
module_name = module_filename[:module_filename.index('_module.rst')]
plugins.add(module_name)
for plugin_filename in glob.glob(os.path.join(topdir, 'plugins', '*', '*.rst')):
plugin_filename = os.path.basename(plugin_filename)
plugin_name = plugin_filename[:plugin_filename.index('.rst')]
plugins.add(plugin_name)
return plugins
def process_refs(topdir, plugin_names):
REF_RE = re.compile(':ref:`([^`]*)`')
LABEL_RE = re.compile('<([^>]*)>$')
# Walk the whole docs tree looking for :ref:. Anywhere those are found, search for `([^`]*)`
for dirpath, dirnames, filenames in os.walk(topdir):
for filename in filenames:
with open(os.path.join(dirpath, filename), 'rb') as f:
data = f.read()
data = to_text(data)
for ref_match in re.finditer(REF_RE, data):
label = ref_match.group(1)
# If the ref label includes "<", then search for the label inside of the "<>"
label_match = re.search(LABEL_RE, label)
if label_match:
label = label_match.group(1)
# If the ref label is listed in plugins, then print that the file contains an unported ref
if label in plugin_names:
print(':ref:`{0}` matching plugin {1} was found in {2}'.format(ref_match.group(1), label, os.path.join(dirpath, filename)))
if __name__ == '__main__':
plugins = plugin_names(TOPDIR)
process_refs(TOPDIR, plugins)
# Fixes needed: docs/bin/plugin_formatter.py
# - t = _MODULE.sub(r":ref:`\1 <\1>`", t)
# + t = _MODULE.sub(r":ref:`\1 <module_\1>`", t)
#
# These have @{module}@ in the template and need to have something like module_@{module}@
# If any of these list plugins as well as modules, they will need to have a conditional or extra
# data passed in to handle that in a generic fashion:
#
# docs/templates/list_of_CATEGORY_modules.rst.j2
# docs/templates/list_of_CATEGORY_plugins.rst.j2
# docs/templates/modules_by_support.rst.j2
#
# These are just a simple manual fix:
# :ref:`command` matching plugin command was found in ./../docsite/rst/user_guide/intro_adhoc.rst
# :ref:`shell` matching plugin shell was found in ./../docsite/rst/user_guide/intro_adhoc.rst
# :ref:`config` matching plugin config was found in ./../docsite/rst/installation_guide/intro_configuration.rst
| gpl-3.0 |
beconomist/baowenchen | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/scanner.py | 365 | 3114 | # -*- coding: utf-8 -*-
"""
pygments.scanner
~~~~~~~~~~~~~~~~
This library implements a regex based scanner. Some languages
like Pascal are easy to parse but have some keywords that
depend on the context. Because of this it's impossible to lex
that just by using a regular expression lexer like the
`RegexLexer`.
Have a look at the `DelphiLexer` to get an idea of how to use
this scanner.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
class EndOfText(RuntimeError):
"""
Raise if end of text is reached and the user
tried to call a match function.
"""
class Scanner(object):
"""
Simple scanner
All method patterns are regular expression strings (not
compiled expressions!)
"""
def __init__(self, text, flags=0):
"""
:param text: The text which should be scanned
:param flags: default regular expression flags
"""
self.data = text
self.data_length = len(text)
self.start_pos = 0
self.pos = 0
self.flags = flags
self.last = None
self.match = None
self._re_cache = {}
def eos(self):
"""`True` if the scanner reached the end of text."""
return self.pos >= self.data_length
eos = property(eos, eos.__doc__)
def check(self, pattern):
"""
Apply `pattern` on the current position and return
the match object. (Doesn't touch pos). Use this for
lookahead.
"""
if self.eos:
raise EndOfText()
if pattern not in self._re_cache:
self._re_cache[pattern] = re.compile(pattern, self.flags)
return self._re_cache[pattern].match(self.data, self.pos)
def test(self, pattern):
"""Apply a pattern on the current position and check
if it patches. Doesn't touch pos."""
return self.check(pattern) is not None
def scan(self, pattern):
"""
Scan the text for the given pattern and update pos/match
and related fields. The return value is a boolen that
indicates if the pattern matched. The matched value is
stored on the instance as ``match``, the last value is
stored as ``last``. ``start_pos`` is the position of the
pointer before the pattern was matched, ``pos`` is the
end position.
"""
if self.eos:
raise EndOfText()
if pattern not in self._re_cache:
self._re_cache[pattern] = re.compile(pattern, self.flags)
self.last = self.match
m = self._re_cache[pattern].match(self.data, self.pos)
if m is None:
return False
self.start_pos = m.start()
self.pos = m.end()
self.match = m.group()
return True
def get_char(self):
"""Scan exactly one char."""
self.scan('.')
def __repr__(self):
return '<%s %d/%d>' % (
self.__class__.__name__,
self.pos,
self.data_length
)
| mit |
aricchen/openHR | openerp/addons/base/ir/ir_default.py | 73 | 1937 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class ir_default(osv.osv):
_name = 'ir.default'
_columns = {
'field_tbl': fields.char('Object',size=64),
'field_name': fields.char('Object Field',size=64),
'value': fields.char('Default Value',size=64),
'uid': fields.many2one('res.users', 'Users'),
'page': fields.char('View',size=64),
'ref_table': fields.char('Table Ref.',size=64),
'ref_id': fields.integer('ID Ref.',size=64),
'company_id': fields.many2one('res.company','Company')
}
def _get_company_id(self, cr, uid, context=None):
res = self.pool.get('res.users').read(cr, uid, [uid], ['company_id'], context=context)
if res and res[0]['company_id']:
return res[0]['company_id'][0]
return False
_defaults = {
'company_id': _get_company_id,
}
ir_default()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
dragonfi/snowfall | pyglet-1.1.4/tests/image/TEXTURE_GRID.py | 18 | 3171 | #!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import unittest
from pyglet.gl import *
from pyglet.image import *
from pyglet.window import *
__noninteractive = True
class TestTextureGrid(unittest.TestCase):
def set_grid_image(self, itemwidth, itemheight, rows, cols, rowpad, colpad):
data = ''
color = 1
width = itemwidth * cols + colpad * (cols - 1)
height = itemheight * rows + rowpad * (rows - 1)
for row in range(rows):
rowdata = ''
for col in range(cols):
rowdata += ('%c' % color) * itemwidth
if col < cols - 1:
rowdata += '\0' * colpad
color += 1
data += rowdata * itemheight
if row < rows - 1:
data += (width * '\0') * rowpad
assert len(data) == width * height
self.image = ImageData(width, height, 'L', data)
self.grid = ImageGrid(self.image, rows, cols,
itemwidth, itemheight, rowpad, colpad).texture_sequence
def check_cell(self, cellimage, cellindex):
self.assertTrue(cellimage.width == self.grid.item_width)
self.assertTrue(cellimage.height == self.grid.item_height)
color = '%c' % (cellindex + 1)
cellimage = cellimage.image_data
data = cellimage.get_data('L', cellimage.width)
self.assertTrue(data == color * len(data))
def setUp(self):
self.w = Window(visible=False)
def testSquare(self):
# Test a 3x3 grid with no padding and 4x4 images
rows = cols = 3
self.set_grid_image(4, 4, rows, cols, 0, 0)
for i in range(rows * cols):
self.check_cell(self.grid[i], i)
def testRect(self):
# Test a 2x5 grid with no padding and 3x8 images
rows, cols = 2, 5
self.set_grid_image(3, 8, rows, cols, 0, 0)
for i in range(rows * cols):
self.check_cell(self.grid[i], i)
def testPad(self):
# Test a 5x3 grid with rowpad=3 and colpad=7 and 10x9 images
rows, cols = 5, 3
self.set_grid_image(10, 9, rows, cols, 3, 7)
for i in range(rows * cols):
self.check_cell(self.grid[i], i)
def testTuple(self):
# Test tuple access
rows, cols = 3, 4
self.set_grid_image(5, 5, rows, cols, 0, 0)
for row in range(rows):
for col in range(cols):
self.check_cell(self.grid[(row, col)], row * cols + col)
def testRange(self):
# Test range access
rows, cols = 4, 3
self.set_grid_image(10, 1, rows, cols, 0, 0)
images = self.grid[4:8]
for i, image in enumerate(images):
self.check_cell(image, i + 4)
def testTupleRange(self):
# Test range over tuples
rows, cols = 10, 10
self.set_grid_image(4, 4, rows, cols, 0, 0)
images = self.grid[(3,2):(6,5)]
i = 0
for row in range(3,6):
for col in range(2,5):
self.check_cell(images[i], row * cols + col)
i += 1
if __name__ == '__main__':
unittest.main()
| mit |
translation-cards/pipe2json | pipe2json.py | 1 | 3003 | #!/usr/bin/python
import sys
import codecs
import json
DELIN = '|'
META_MARK = 'META:'
SOURCE_LANGUAGE_KEY = 'source_language'
DEFAULT_SOURCE_LANG = 'en'
# put key names in order they appear in the meta row of the pipe csv
meta_keys = [
'deck_label',
'publisher',
'id',
'timestamp',
SOURCE_LANGUAGE_KEY,
'locked'
]
# put key names in order they appear in the card rows of the pipe csv
card_keys = [
'card_label',
'dest_audio',
'dest_language',
'dest_txt'
]
iso_map = {
'arabic': 'ar',
'english': 'en',
'farsi': 'fa',
'pashto': 'ps',
'urdu': 'ur'
}
langs = []
lang_map = {}
root = {"languages": langs}
def iso_for_lang(lang):
low_lang = lang.lower()
return (iso_map[low_lang] if low_lang in iso_map else None)
def parse(cols, dest, key_dict):
for count, val in enumerate(cols):
# tripple strip is kinda ugly, could be a regex match, but hey, it works
dest[key_dict[count]] = val.strip().strip('"\'').strip()
def parse_card(cols):
card = {}
parse(cols, card, card_keys)
# convert language to iso code and record
iso_lang = iso_for_lang(card['dest_language'])
if iso_lang is not None:
del card['dest_language'] # don't need it anymore
lang = None
if iso_lang in lang_map.keys():
lang = langs[lang_map[iso_lang]]
else:
lang = {'iso_code': iso_lang, "cards": []}
langs.append(lang)
lang_map[iso_lang] = len(langs) - 1
lang['cards'].append(card)
else:
# leave it alone, language not in map
sys.stderr.write('Could not find language: %s. Card skipped' % card['dest_language'])
def read_pipe():
# read in creating the two dicts
for line in sys.stdin:
cols = line.split(DELIN)
if cols[0].startswith(META_MARK):
# remove meta marker from first col
cols[0] = cols[0].split(META_MARK)[1]
parse(cols, root, meta_keys)
else:
parse_card(cols)
# fix up source lang
if SOURCE_LANGUAGE_KEY in root and iso_for_lang(root[SOURCE_LANGUAGE_KEY]) is not None:
root[SOURCE_LANGUAGE_KEY] = iso_for_lang(root[SOURCE_LANGUAGE_KEY])
else:
root[SOURCE_LANGUAGE_KEY] = DEFAULT_SOURCE_LANG
def write_json():
json.dump(root, sys.stdout, ensure_ascii=False, encoding="utf_8", indent=2)
if __name__ == "__main__":
if len(sys.argv) > 1:
try:
sys.stdin = codecs.open(sys.argv[1], 'r', 'utf_8')
except:
sys.stderr.write("Cannot read file: %s\n" % sys.argv[1])
sys.exit(-1)
if len(sys.argv) > 2:
try:
sys.stdout = codecs.open(sys.argv[2], 'w', 'utf_8')
except:
sys.stderr.write("Cannot write file: %s\n" % sys.argv[2])
sys.exit(-1)
if len(sys.argv) > 3:
sys.stderr.write("Usage: pipe2json <infile> <outfile>\n")
sys.exit(-1)
read_pipe()
write_json()
| apache-2.0 |
steedos/odoo | addons/auth_signup/res_users.py | 116 | 14728 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from datetime import datetime, timedelta
import random
from urlparse import urljoin
import werkzeug
from openerp.addons.base.ir.ir_mail_server import MailDeliveryException
from openerp.osv import osv, fields
from openerp.tools.misc import DEFAULT_SERVER_DATETIME_FORMAT, ustr
from ast import literal_eval
from openerp.tools.translate import _
class SignupError(Exception):
pass
def random_token():
# the token has an entropy of about 120 bits (6 bits/char * 20 chars)
chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
return ''.join(random.SystemRandom().choice(chars) for i in xrange(20))
def now(**kwargs):
dt = datetime.now() + timedelta(**kwargs)
return dt.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
class res_partner(osv.Model):
_inherit = 'res.partner'
def _get_signup_valid(self, cr, uid, ids, name, arg, context=None):
dt = now()
res = {}
for partner in self.browse(cr, uid, ids, context):
res[partner.id] = bool(partner.signup_token) and \
(not partner.signup_expiration or dt <= partner.signup_expiration)
return res
def _get_signup_url_for_action(self, cr, uid, ids, action=None, view_type=None, menu_id=None, res_id=None, model=None, context=None):
""" generate a signup url for the given partner ids and action, possibly overriding
the url state components (menu_id, id, view_type) """
if context is None:
context= {}
res = dict.fromkeys(ids, False)
base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url')
for partner in self.browse(cr, uid, ids, context):
# when required, make sure the partner has a valid signup token
if context.get('signup_valid') and not partner.user_ids:
self.signup_prepare(cr, uid, [partner.id], context=context)
route = 'login'
# the parameters to encode for the query
query = dict(db=cr.dbname)
signup_type = context.get('signup_force_type_in_url', partner.signup_type or '')
if signup_type:
route = 'reset_password' if signup_type == 'reset' else signup_type
if partner.signup_token and signup_type:
query['token'] = partner.signup_token
elif partner.user_ids:
query['login'] = partner.user_ids[0].login
else:
continue # no signup token, no user, thus no signup url!
fragment = dict()
if action:
fragment['action'] = action
if view_type:
fragment['view_type'] = view_type
if menu_id:
fragment['menu_id'] = menu_id
if model:
fragment['model'] = model
if res_id:
fragment['id'] = res_id
if fragment:
query['redirect'] = '/web#' + werkzeug.url_encode(fragment)
res[partner.id] = urljoin(base_url, "/web/%s?%s" % (route, werkzeug.url_encode(query)))
return res
def _get_signup_url(self, cr, uid, ids, name, arg, context=None):
""" proxy for function field towards actual implementation """
return self._get_signup_url_for_action(cr, uid, ids, context=context)
_columns = {
'signup_token': fields.char('Signup Token', copy=False),
'signup_type': fields.char('Signup Token Type', copy=False),
'signup_expiration': fields.datetime('Signup Expiration', copy=False),
'signup_valid': fields.function(_get_signup_valid, type='boolean', string='Signup Token is Valid'),
'signup_url': fields.function(_get_signup_url, type='char', string='Signup URL'),
}
def action_signup_prepare(self, cr, uid, ids, context=None):
return self.signup_prepare(cr, uid, ids, context=context)
def signup_cancel(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'signup_token': False, 'signup_type': False, 'signup_expiration': False}, context=context)
def signup_prepare(self, cr, uid, ids, signup_type="signup", expiration=False, context=None):
""" generate a new token for the partners with the given validity, if necessary
:param expiration: the expiration datetime of the token (string, optional)
"""
for partner in self.browse(cr, uid, ids, context):
if expiration or not partner.signup_valid:
token = random_token()
while self._signup_retrieve_partner(cr, uid, token, context=context):
token = random_token()
partner.write({'signup_token': token, 'signup_type': signup_type, 'signup_expiration': expiration})
return True
def _signup_retrieve_partner(self, cr, uid, token,
check_validity=False, raise_exception=False, context=None):
""" find the partner corresponding to a token, and possibly check its validity
:param token: the token to resolve
:param check_validity: if True, also check validity
:param raise_exception: if True, raise exception instead of returning False
:return: partner (browse record) or False (if raise_exception is False)
"""
partner_ids = self.search(cr, uid, [('signup_token', '=', token)], context=context)
if not partner_ids:
if raise_exception:
raise SignupError("Signup token '%s' is not valid" % token)
return False
partner = self.browse(cr, uid, partner_ids[0], context)
if check_validity and not partner.signup_valid:
if raise_exception:
raise SignupError("Signup token '%s' is no longer valid" % token)
return False
return partner
def signup_retrieve_info(self, cr, uid, token, context=None):
""" retrieve the user info about the token
:return: a dictionary with the user information:
- 'db': the name of the database
- 'token': the token, if token is valid
- 'name': the name of the partner, if token is valid
- 'login': the user login, if the user already exists
- 'email': the partner email, if the user does not exist
"""
partner = self._signup_retrieve_partner(cr, uid, token, raise_exception=True, context=None)
res = {'db': cr.dbname}
if partner.signup_valid:
res['token'] = token
res['name'] = partner.name
if partner.user_ids:
res['login'] = partner.user_ids[0].login
else:
res['email'] = partner.email or ''
return res
class res_users(osv.Model):
_inherit = 'res.users'
def _get_state(self, cr, uid, ids, name, arg, context=None):
res = {}
for user in self.browse(cr, uid, ids, context):
res[user.id] = ('active' if user.login_date else 'new')
return res
_columns = {
'state': fields.function(_get_state, string='Status', type='selection',
selection=[('new', 'Never Connected'), ('active', 'Activated')]),
}
def signup(self, cr, uid, values, token=None, context=None):
""" signup a user, to either:
- create a new user (no token), or
- create a user for a partner (with token, but no user for partner), or
- change the password of a user (with token, and existing user).
:param values: a dictionary with field values that are written on user
:param token: signup token (optional)
:return: (dbname, login, password) for the signed up user
"""
if token:
# signup with a token: find the corresponding partner id
res_partner = self.pool.get('res.partner')
partner = res_partner._signup_retrieve_partner(
cr, uid, token, check_validity=True, raise_exception=True, context=None)
# invalidate signup token
partner.write({'signup_token': False, 'signup_type': False, 'signup_expiration': False})
partner_user = partner.user_ids and partner.user_ids[0] or False
# avoid overwriting existing (presumably correct) values with geolocation data
if partner.country_id or partner.zip or partner.city:
values.pop('city', None)
values.pop('country_id', None)
if partner.lang:
values.pop('lang', None)
if partner_user:
# user exists, modify it according to values
values.pop('login', None)
values.pop('name', None)
partner_user.write(values)
return (cr.dbname, partner_user.login, values.get('password'))
else:
# user does not exist: sign up invited user
values.update({
'name': partner.name,
'partner_id': partner.id,
'email': values.get('email') or values.get('login'),
})
if partner.company_id:
values['company_id'] = partner.company_id.id
values['company_ids'] = [(6, 0, [partner.company_id.id])]
self._signup_create_user(cr, uid, values, context=context)
else:
# no token, sign up an external user
values['email'] = values.get('email') or values.get('login')
self._signup_create_user(cr, uid, values, context=context)
return (cr.dbname, values.get('login'), values.get('password'))
def _signup_create_user(self, cr, uid, values, context=None):
""" create a new user from the template user """
ir_config_parameter = self.pool.get('ir.config_parameter')
template_user_id = literal_eval(ir_config_parameter.get_param(cr, uid, 'auth_signup.template_user_id', 'False'))
assert template_user_id and self.exists(cr, uid, template_user_id, context=context), 'Signup: invalid template user'
# check that uninvited users may sign up
if 'partner_id' not in values:
if not literal_eval(ir_config_parameter.get_param(cr, uid, 'auth_signup.allow_uninvited', 'False')):
raise SignupError('Signup is not allowed for uninvited users')
assert values.get('login'), "Signup: no login given for new user"
assert values.get('partner_id') or values.get('name'), "Signup: no name or partner given for new user"
# create a copy of the template user (attached to a specific partner_id if given)
values['active'] = True
context = dict(context or {}, no_reset_password=True)
try:
with cr.savepoint():
return self.copy(cr, uid, template_user_id, values, context=context)
except Exception, e:
# copy may failed if asked login is not available.
raise SignupError(ustr(e))
def reset_password(self, cr, uid, login, context=None):
""" retrieve the user corresponding to login (login or email),
and reset their password
"""
user_ids = self.search(cr, uid, [('login', '=', login)], context=context)
if not user_ids:
user_ids = self.search(cr, uid, [('email', '=', login)], context=context)
if len(user_ids) != 1:
raise Exception(_('Reset password: invalid username or email'))
return self.action_reset_password(cr, uid, user_ids, context=context)
def action_reset_password(self, cr, uid, ids, context=None):
""" create signup token for each user, and send their signup url by email """
# prepare reset password signup
res_partner = self.pool.get('res.partner')
partner_ids = [user.partner_id.id for user in self.browse(cr, uid, ids, context)]
res_partner.signup_prepare(cr, uid, partner_ids, signup_type="reset", expiration=now(days=+1), context=context)
if not context:
context = {}
# send email to users with their signup url
template = False
if context.get('create_user'):
try:
# get_object() raises ValueError if record does not exist
template = self.pool.get('ir.model.data').get_object(cr, uid, 'auth_signup', 'set_password_email')
except ValueError:
pass
if not bool(template):
template = self.pool.get('ir.model.data').get_object(cr, uid, 'auth_signup', 'reset_password_email')
assert template._name == 'email.template'
for user in self.browse(cr, uid, ids, context):
if not user.email:
raise osv.except_osv(_("Cannot send email: user has no email address."), user.name)
self.pool.get('email.template').send_mail(cr, uid, template.id, user.id, force_send=True, raise_exception=True, context=context)
def create(self, cr, uid, values, context=None):
if context is None:
context = {}
# overridden to automatically invite user to sign up
user_id = super(res_users, self).create(cr, uid, values, context=context)
user = self.browse(cr, uid, user_id, context=context)
if user.email and not context.get('no_reset_password'):
context = dict(context, create_user=True)
try:
self.action_reset_password(cr, uid, [user.id], context=context)
except MailDeliveryException:
self.pool.get('res.partner').signup_cancel(cr, uid, [user.partner_id.id], context=context)
return user_id
| agpl-3.0 |
ianyh/heroku-buildpack-python-nodejs | vendor/distribute-0.6.36/setuptools/tests/test_packageindex.py | 67 | 5282 | """Package Index Tests
"""
import sys
import unittest
import urllib2
import pkg_resources
import httplib
import distutils.errors
import setuptools.package_index
from server import IndexServer
class TestPackageIndex(unittest.TestCase):
def test_bad_url_bad_port(self):
index = setuptools.package_index.PackageIndex()
url = 'http://127.0.0.1:0/nonesuch/test_package_index'
try:
v = index.open_url(url)
except Exception, v:
self.assertTrue(url in str(v))
else:
self.assertTrue(isinstance(v,urllib2.HTTPError))
def test_bad_url_typo(self):
# issue 16
# easy_install inquant.contentmirror.plone breaks because of a typo
# in its home URL
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
url = 'url:%20https://svn.plone.org/svn/collective/inquant.contentmirror.plone/trunk'
try:
v = index.open_url(url)
except Exception, v:
self.assertTrue(url in str(v))
else:
self.assertTrue(isinstance(v, urllib2.HTTPError))
def test_bad_url_bad_status_line(self):
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
def _urlopen(*args):
import httplib
raise httplib.BadStatusLine('line')
old_urlopen = urllib2.urlopen
urllib2.urlopen = _urlopen
url = 'http://example.com'
try:
try:
v = index.open_url(url)
except Exception, v:
self.assertTrue('line' in str(v))
else:
raise AssertionError('Should have raise here!')
finally:
urllib2.urlopen = old_urlopen
def test_bad_url_double_scheme(self):
"""
A bad URL with a double scheme should raise a DistutilsError.
"""
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
# issue 20
url = 'http://http://svn.pythonpaste.org/Paste/wphp/trunk'
try:
index.open_url(url)
except distutils.errors.DistutilsError, error:
msg = unicode(error)
assert 'nonnumeric port' in msg or 'getaddrinfo failed' in msg or 'Name or service not known' in msg
return
raise RuntimeError("Did not raise")
def test_bad_url_screwy_href(self):
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
# issue #160
if sys.version_info[0] == 2 and sys.version_info[1] == 7:
# this should not fail
url = 'http://example.com'
page = ('<a href="http://www.famfamfam.com]('
'http://www.famfamfam.com/">')
index.process_index(url, page)
def test_url_ok(self):
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
url = 'file:///tmp/test_package_index'
self.assertTrue(index.url_ok(url, True))
def test_links_priority(self):
"""
Download links from the pypi simple index should be used before
external download links.
http://bitbucket.org/tarek/distribute/issue/163/md5-validation-error
Usecase :
- someone uploads a package on pypi, a md5 is generated
- someone manually copies this link (with the md5 in the url) onto an
external page accessible from the package page.
- someone reuploads the package (with a different md5)
- while easy_installing, an MD5 error occurs because the external link
is used
-> Distribute should use the link from pypi, not the external one.
"""
if sys.platform.startswith('java'):
# Skip this test on jython because binding to :0 fails
return
# start an index server
server = IndexServer()
server.start()
index_url = server.base_url() + 'test_links_priority/simple/'
# scan a test index
pi = setuptools.package_index.PackageIndex(index_url)
requirement = pkg_resources.Requirement.parse('foobar')
pi.find_packages(requirement)
server.stop()
# the distribution has been found
self.assertTrue('foobar' in pi)
# we have only one link, because links are compared without md5
self.assertTrue(len(pi['foobar'])==1)
# the link should be from the index
self.assertTrue('correct_md5' in pi['foobar'][0].location)
def test_parse_bdist_wininst(self):
self.assertEqual(setuptools.package_index.parse_bdist_wininst(
'reportlab-2.5.win32-py2.4.exe'), ('reportlab-2.5', '2.4', 'win32'))
self.assertEqual(setuptools.package_index.parse_bdist_wininst(
'reportlab-2.5.win32.exe'), ('reportlab-2.5', None, 'win32'))
self.assertEqual(setuptools.package_index.parse_bdist_wininst(
'reportlab-2.5.win-amd64-py2.7.exe'), ('reportlab-2.5', '2.7', 'win-amd64'))
self.assertEqual(setuptools.package_index.parse_bdist_wininst(
'reportlab-2.5.win-amd64.exe'), ('reportlab-2.5', None, 'win-amd64'))
| mit |
opencorato/represent-boundaries | boundaries/tests/__init__.py | 1 | 12052 | # coding: utf-8
from __future__ import unicode_literals
import json
import re
from copy import deepcopy
from django.conf import settings
from django.contrib.gis.gdal import OGRGeometry
from django.contrib.gis.geos import GEOSGeometry, MultiPolygon
from django.test import TestCase
from django.utils.encoding import python_2_unicode_compatible
from django.utils.six import assertRegex, string_types
from django.utils.six.moves.urllib.parse import parse_qsl, unquote_plus, urlparse
from boundaries.models import app_settings, Boundary
jsonp_re = re.compile(r'\AabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890_\((.+)\);\Z', re.DOTALL)
pretty_re = re.compile(r'\n ')
if not hasattr(TestCase, 'assertCountEqual'): # Python < 3.2
TestCase.assertCountEqual = TestCase.assertItemsEqual
class FeatureProxy(dict):
def __init__(self, fields):
self.update(fields)
@property
def fields(self):
return self.keys()
@property
def geom(self):
return OGRGeometry('MULTIPOLYGON (((0 0,0.0001 0.0001,0 5,5 5,0 0)))')
class ViewTestCase(TestCase):
non_integers = ('', '1.0', '0b1', '0o1', '0x1') # '01' is okay
def assertResponse(self, response, content_type='application/json; charset=utf-8'):
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], content_type)
if app_settings.ALLOW_ORIGIN and 'application/json' in response['Content-Type']:
self.assertEqual(response['Access-Control-Allow-Origin'], '*')
else:
self.assertNotIn('Access-Control-Allow-Origin', response)
def assertNotFound(self, response):
self.assertEqual(response.status_code, 404)
self.assertIn(response['Content-Type'], ('text/html', 'text/html; charset=utf-8')) # different versions of Django
self.assertNotIn('Access-Control-Allow-Origin', response)
def assertError(self, response):
self.assertEqual(response.status_code, 400)
self.assertEqual(response['Content-Type'], 'text/plain')
self.assertNotIn('Access-Control-Allow-Origin', response)
def assertForbidden(self, response):
self.assertEqual(response.status_code, 403)
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
self.assertNotIn('Access-Control-Allow-Origin', response)
def assertJSONEqual(self, actual, expected):
if isinstance(actual, string_types):
actual = json.loads(actual)
else: # It's a response.
actual = load_response(actual)
if isinstance(expected, string_types):
expected = json.loads(expected)
else:
expected = deepcopy(expected)
self.assertCountEqual(comparable(actual), comparable(expected))
@python_2_unicode_compatible
class URL(object):
"""
https://stackoverflow.com/questions/5371992/comparing-two-urls-in-python
"""
def __init__(self, url):
if isinstance(url, string_types):
parsed = urlparse(url)
self.parsed = parsed._replace(query=frozenset(parse_qsl(parsed.query)), path=unquote_plus(parsed.path))
else: # It's already a URL.
self.parsed = url.parsed
def __eq__(self, other):
return self.parsed == other.parsed
def __hash__(self):
return hash(self.parsed)
def __str__(self):
return self.parsed.geturl()
def comparable(o):
"""
The order of URL query parameters may differ, so make URLs into URL objects,
which ignore query parameter ordering.
"""
if isinstance(o, dict):
for k, v in o.items():
if v is None:
o[k] = None
elif k.endswith('url') or k in ('next', 'previous'):
o[k] = URL(v)
else:
o[k] = comparable(v)
elif isinstance(o, list):
o = [comparable(v) for v in o]
return o
def load_response(response):
return json.loads(response.content.decode('utf-8'))
class ViewsTests(object):
def test_get(self):
response = self.client.get(self.url)
self.assertResponse(response)
self.assertEqual(load_response(response), self.json)
def test_allow_origin(self):
app_settings.ALLOW_ORIGIN, _ = None, app_settings.ALLOW_ORIGIN
response = self.client.get(self.url)
self.assertResponse(response)
self.assertEqual(load_response(response), self.json)
app_settings.ALLOW_ORIGIN = _
def test_jsonp(self):
response = self.client.get(self.url, {'callback': 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890`~!@#$%^&*()-_=+[{]}\\|;:\'",<.>/?'})
self.assertResponse(response)
content = response.content.decode('utf-8')
self.assertJSONEqual(content[64:-2], self.json)
assertRegex(self, content, jsonp_re)
def test_apibrowser(self):
response = self.client.get(self.url, {'format': 'apibrowser', 'limit': 20})
self.assertResponse(response, content_type='text/html; charset=utf-8')
class PrettyTests(object):
def test_pretty(self):
response = self.client.get(self.url, {'pretty': 1})
self.assertResponse(response)
self.assertEqual(load_response(response), self.json)
assertRegex(self, response.content.decode('utf-8'), pretty_re)
def test_jsonp_and_pretty(self):
response = self.client.get(self.url, {'callback': 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890`~!@#$%^&*()-_=+[{]}\\|;:\'",<.>/?', 'pretty': 1})
self.assertResponse(response)
content = response.content.decode('utf-8')
self.assertJSONEqual(content[64:-2], self.json)
assertRegex(self, content, jsonp_re)
assertRegex(self, response.content.decode('utf-8'), pretty_re)
class PaginationTests(object):
def test_limit_is_set(self):
response = self.client.get(self.url, {'limit': 10})
self.assertResponse(response)
data = deepcopy(self.json)
data['meta']['limit'] = 10
self.assertEqual(load_response(response), data)
def test_offset_is_set(self):
response = self.client.get(self.url, {'offset': 10})
self.assertResponse(response)
data = deepcopy(self.json)
data['meta']['offset'] = 10
self.assertEqual(load_response(response), data)
def test_limit_is_set_to_maximum_if_zero(self):
response = self.client.get(self.url, {'limit': 0})
self.assertResponse(response)
data = deepcopy(self.json)
data['meta']['limit'] = 1000
self.assertEqual(load_response(response), data)
def test_limit_is_set_to_maximum_if_greater_than_maximum(self):
response = self.client.get(self.url, {'limit': 2000})
self.assertResponse(response)
data = deepcopy(self.json)
data['meta']['limit'] = 1000
self.assertEqual(load_response(response), data)
def test_api_limit_per_page(self):
settings.API_LIMIT_PER_PAGE, _ = 1, getattr(settings, 'API_LIMIT_PER_PAGE', 20)
response = self.client.get(self.url)
self.assertResponse(response)
data = deepcopy(self.json)
data['meta']['limit'] = 1
self.assertEqual(load_response(response), data)
settings.API_LIMIT_PER_PAGE = _
def test_limit_must_be_an_integer(self):
for value in self.non_integers:
response = self.client.get(self.url, {'limit': value})
self.assertError(response)
self.assertEqual(response.content, ("Invalid limit '%s' provided. Please provide a positive integer." % value).encode('ascii'))
def test_offset_must_be_an_integer(self):
for value in self.non_integers:
response = self.client.get(self.url, {'offset': value})
self.assertError(response)
self.assertEqual(response.content, ("Invalid offset '%s' provided. Please provide a positive integer." % value).encode('ascii'))
def test_limit_must_be_non_negative(self):
response = self.client.get(self.url, {'limit': -1})
self.assertError(response)
self.assertEqual(response.content, b"Invalid limit '-1' provided. Please provide a positive integer >= 0.")
def test_offset_must_be_non_negative(self):
response = self.client.get(self.url, {'offset': -1})
self.assertError(response)
self.assertEqual(response.content, b"Invalid offset '-1' provided. Please provide a positive integer >= 0.")
class BoundaryListTests(object):
def test_omits_meta_if_too_many_items_match(self):
app_settings.MAX_GEO_LIST_RESULTS, _ = 0, app_settings.MAX_GEO_LIST_RESULTS
geom = GEOSGeometry('MULTIPOLYGON(((0 0,0 5,5 5,0 0)))')
Boundary.objects.create(slug='foo', set_id='inc', shape=geom, simple_shape=geom)
response = self.client.get(self.url)
self.assertResponse(response)
self.assertJSONEqual(response, '{"objects": [{"url": "/boundaries/inc/foo/", "boundary_set_name": "", "external_id": "", "name": "", "related": {"boundary_set_url": "/boundary-sets/inc/"}}], "meta": {"next": null, "total_count": 1, "previous": null, "limit": 20, "offset": 0}}')
app_settings.MAX_GEO_LIST_RESULTS = _
class GeoListTests(object):
def test_must_not_match_too_many_items(self):
app_settings.MAX_GEO_LIST_RESULTS, _ = 0, app_settings.MAX_GEO_LIST_RESULTS
response = self.client.get(self.url)
self.assertForbidden(response)
self.assertEqual(response.content, b'Spatial-list queries cannot return more than 0 resources; this query would return 1. Please filter your query.')
app_settings.MAX_GEO_LIST_RESULTS = _
class GeoTests(object):
def test_wkt(self):
response = self.client.get(self.url, {'format': 'wkt'})
self.assertResponse(response, content_type='text/plain')
self.assertEqual(response.content, b'MULTIPOLYGON (((0.0000000000000000 0.0000000000000000, 0.0000000000000000 5.0000000000000000, 5.0000000000000000 5.0000000000000000, 0.0000000000000000 0.0000000000000000)))')
def test_kml(self):
response = self.client.get(self.url, {'format': 'kml'})
self.assertResponse(response, content_type='application/vnd.google-earth.kml+xml')
self.assertEqual(response.content, b'<?xml version="1.0" encoding="UTF-8"?>\n<kml xmlns="http://www.opengis.net/kml/2.2">\n<Document>\n<Placemark><name></name><MultiGeometry><Polygon><outerBoundaryIs><LinearRing><coordinates>0.0,0.0,0 0.0,5.0,0 5.0,5.0,0 0.0,0.0,0</coordinates></LinearRing></outerBoundaryIs></Polygon></MultiGeometry></Placemark>\n</Document>\n</kml>')
self.assertEqual(response['Content-Disposition'], 'attachment; filename="shape.kml"')
def test_invalid(self):
self.assertRaises(NotImplementedError, self.client.get, self.url, {'format': 'invalid'})
# For Django < 1.6
from boundaries.tests.test import *
from boundaries.tests.test_boundary import *
from boundaries.tests.test_boundary_detail import *
from boundaries.tests.test_boundary_geo_detail import *
from boundaries.tests.test_boundary_list import *
from boundaries.tests.test_boundary_list_filter import *
from boundaries.tests.test_boundary_list_geo import *
from boundaries.tests.test_boundary_list_geo_filter import *
from boundaries.tests.test_boundary_list_set import *
from boundaries.tests.test_boundary_list_set_filter import *
from boundaries.tests.test_boundary_list_set_geo import *
from boundaries.tests.test_boundary_list_set_geo_filter import *
from boundaries.tests.test_boundary_set import *
from boundaries.tests.test_boundary_set_detail import *
from boundaries.tests.test_boundary_set_list import *
from boundaries.tests.test_boundary_set_list_filter import *
from boundaries.tests.test_compute_intersections import *
from boundaries.tests.test_definition import *
from boundaries.tests.test_feature import *
from boundaries.tests.test_geometry import *
from boundaries.tests.test_loadshapefiles import *
| mit |
otger/PubSubTest | tests/actions/system.py | 2 | 1441 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from entropyfw import System
from .simplemod import SimpleModule
from entropyfw.common.request import Request
__author__ = 'otger'
class SystemActions(System):
def __init__(self):
System.__init__(self)
self.add_module(SimpleModule(name='adder'))
def sum(self, a, b):
r = self.send_request(target='adder', command='addition',
arguments={'s1': a, 's2': b})
return r
def list_functionality(self):
r = self.send_request(target='adder',
command='listregisteredactions')
self.dealer.request(r)
r.wait_answer()
s = "Functionality of module 'adder': \n"
for el in r.return_value:
s += "\t - {0}\n".format(', '.join([str(x) for x in el]))
print(s)
return r
if __name__ == "__main__":
from entropyfw.logger import log, formatter
import logging
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
log.addHandler(ch)
s = SystemActions()
log.info('Created system')
r = s.sum(1, 2)
log.info('Asked int sum')
log.info("Sum(1,2) returned: {0}".format(r.return_value))
r.wait_answer()
r = s.sum('a', 'b')
log.info('Asked str sum')
r.wait_answer()
log.info("Sum('a', 'b') returned: {0}".format(r.return_value))
s.list_functionality()
s.exit()
| lgpl-3.0 |
rohanp/scikit-learn | sklearn/covariance/tests/test_covariance.py | 34 | 11120 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
assert_greater(np.amin(mahal_dist), 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# Create X with 1 sample and 5 features
X_1sample = np.arange(5).reshape(1, 5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0:1]
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| bsd-3-clause |
sanjayankur31/nest-simulator | doc/userdoc/mock_kernel.py | 21 | 3167 | # -*- coding: utf-8 -*-
#
# mock_kernel.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Mock pynestkernel.pyx into dummy python file.
"""
import ast
import re
def has_return(ast_func):
b = False
for node in ast.walk(ast_func):
if isinstance(node, ast.Return):
b = True
return b
def convert(infile):
"""Turn cython file into python
Munge the cython file into parsable python and return the converted
result as a string.
The conversion is not correct but it can then be parsed by ast and
thus coverted to a fully mocked file with dummy classes and fuctions
(either pass or return MagicMock)
"""
res, res_tmp = "", ""
cdef_in_classes_re = re.compile(r' +cdef')
cdef_class_re = re.compile(r'cdef class (.*)')
rmdatatype_re = re.compile(r'\bint\b|\bnew\b|\&(?=\w)|<[^>]+>')
inclass = False
for line in infile:
if "__cinit__" in line:
line = line.replace("__cinit__", "__init__")
if inclass is True:
if cdef_in_classes_re.match(line):
continue
if not (line.startswith(" ") or line == "\n"):
inclass = False
else:
line = rmdatatype_re.sub("", line)
res_tmp += line
if inclass is False:
m = cdef_class_re.match(line)
if m is not None:
res_tmp += line[5:] # remove "cdef "
inclass = True
else:
if line.startswith("cdef"):
continue
tree = ast.parse(res_tmp)
for klass in tree.body:
bases = ""
if klass.bases:
if len(klass.bases) == 1:
bases = "(" + klass.bases[0].id + ")"
else:
bases = "(" + ", ".join([k.id for k in klass.bases]) + ")"
res += "class {name}{bases}:\n".format(name=klass.name, bases=bases)
for child in klass.body:
if isinstance(child, ast.FunctionDef):
args = ""
if len(child.args.args) == 1:
args = child.args.args[0].arg
else:
args = ", ".join([a.arg for a in child.args.args])
res += " def {name}({args}):\n".format(name=child.name, args=args)
if has_return(child):
res += " return MagicMock()\n"
else:
res += " pass\n"
res += "\n\n"
return res
| gpl-2.0 |
wkoathp/glance | glance/tests/functional/test_reload.py | 16 | 9235 | # Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import time
import psutil
import requests
from glance.tests import functional
from glance.tests.utils import execute
TEST_VAR_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
'../', 'var'))
def set_config_value(filepath, key, value):
"""Set 'key = value' in config file"""
replacement_line = '%s = %s\n' % (key, value)
match = re.compile('^%s\s+=' % key).match
with open(filepath, 'r+') as f:
lines = f.readlines()
f.seek(0, 0)
f.truncate()
for line in lines:
f.write(line if not match(line) else replacement_line)
class TestReload(functional.FunctionalTest):
"""Test configuration reload"""
def setUp(self):
self.workers = 1
super(TestReload, self).setUp()
def tearDown(self):
self.stop_servers()
super(TestReload, self).tearDown()
def ticker(self, message, seconds=60, tick=0.01):
"""
Allows repeatedly testing for an expected result
for a finite amount of time.
:param message: Message to display on timeout
:param seconds: Time in seconds after which we timeout
:param tick: Time to sleep before rechecking for expected result
:returns: 'True' or fails the test with 'message' on timeout
"""
# We default to allowing 60 seconds timeout but
# typically only a few hundredths of a second
# are needed.
num_ticks = seconds * (1.0 / tick)
count = 0
while count < num_ticks:
count += 1
time.sleep(tick)
yield
self.fail(message)
def _get_children(self, server):
pid = None
pid = self._get_parent(server)
process = psutil.Process(pid)
children = process.get_children()
pids = set()
for child in children:
pids.add(child.pid)
return pids
def _get_parent(self, server):
if server == 'api':
return self.api_server.process_pid
elif server == 'registry':
return self.registry_server.process_pid
def _conffile(self, service):
conf_dir = os.path.join(self.test_dir, 'etc')
conf_filepath = os.path.join(conf_dir, '%s.conf' % service)
return conf_filepath
def _url(self, protocol, path):
return '%s://127.0.0.1:%d%s' % (protocol, self.api_port, path)
def test_reload(self):
"""Test SIGHUP picks up new config values"""
def check_pids(pre, post=None, workers=2):
if post is None:
if len(pre) == workers:
return True
else:
return False
if len(post) == workers:
# Check new children have different pids
if post.intersection(pre) == set():
return True
return False
self.api_server.fork_socket = False
self.registry_server.fork_socket = False
self.start_servers(fork_socket=False, **vars(self))
pre_pids = {}
post_pids = {}
# Test changing the workers value creates all new children
# This recycles the existing socket
msg = 'Start timeout'
for _ in self.ticker(msg):
for server in ('api', 'registry'):
pre_pids[server] = self._get_children(server)
if check_pids(pre_pids['api'], workers=1):
if check_pids(pre_pids['registry'], workers=1):
break
for server in ('api', 'registry'):
# Labour costs have fallen
set_config_value(self._conffile(server), 'workers', '2')
cmd = "kill -HUP %s" % self._get_parent(server)
execute(cmd, raise_error=True)
msg = 'Worker change timeout'
for _ in self.ticker(msg):
for server in ('api', 'registry'):
post_pids[server] = self._get_children(server)
if check_pids(pre_pids['registry'], post_pids['registry']):
if check_pids(pre_pids['api'], post_pids['api']):
break
# Test changing from http to https
# This recycles the existing socket
path = self._url('http', '/')
response = requests.get(path)
self.assertEqual(300, response.status_code)
del response # close socket so that process audit is reliable
pre_pids['api'] = self._get_children('api')
key_file = os.path.join(TEST_VAR_DIR, 'privatekey.key')
set_config_value(self._conffile('api'), 'key_file', key_file)
cert_file = os.path.join(TEST_VAR_DIR, 'certificate.crt')
set_config_value(self._conffile('api'), 'cert_file', cert_file)
cmd = "kill -HUP %s" % self._get_parent('api')
execute(cmd, raise_error=True)
msg = 'http to https timeout'
for _ in self.ticker(msg):
post_pids['api'] = self._get_children('api')
if check_pids(pre_pids['api'], post_pids['api']):
break
ca_file = os.path.join(TEST_VAR_DIR, 'ca.crt')
path = self._url('https', '/')
response = requests.get(path, verify=ca_file)
self.assertEqual(300, response.status_code)
del response
# Test https restart
# This recycles the existing socket
pre_pids['api'] = self._get_children('api')
cmd = "kill -HUP %s" % self._get_parent('api')
execute(cmd, raise_error=True)
msg = 'https restart timeout'
for _ in self.ticker(msg):
post_pids['api'] = self._get_children('api')
if check_pids(pre_pids['api'], post_pids['api']):
break
ca_file = os.path.join(TEST_VAR_DIR, 'ca.crt')
path = self._url('https', '/')
response = requests.get(path, verify=ca_file)
self.assertEqual(300, response.status_code)
del response
# Test changing the https bind_host
# This requires a new socket
pre_pids['api'] = self._get_children('api')
set_config_value(self._conffile('api'), 'bind_host', '127.0.0.1')
cmd = "kill -HUP %s" % self._get_parent('api')
execute(cmd, raise_error=True)
msg = 'https bind_host timeout'
for _ in self.ticker(msg):
post_pids['api'] = self._get_children('api')
if check_pids(pre_pids['api'], post_pids['api']):
break
path = self._url('https', '/')
response = requests.get(path, verify=ca_file)
self.assertEqual(300, response.status_code)
del response
# Test https -> http
# This recycles the existing socket
pre_pids['api'] = self._get_children('api')
set_config_value(self._conffile('api'), 'key_file', '')
set_config_value(self._conffile('api'), 'cert_file', '')
cmd = "kill -HUP %s" % self._get_parent('api')
execute(cmd, raise_error=True)
msg = 'https to http timeout'
for _ in self.ticker(msg):
post_pids['api'] = self._get_children('api')
if check_pids(pre_pids['api'], post_pids['api']):
break
path = self._url('http', '/')
response = requests.get(path)
self.assertEqual(300, response.status_code)
del response
# Test changing the http bind_host
# This requires a new socket
pre_pids['api'] = self._get_children('api')
set_config_value(self._conffile('api'), 'bind_host', '127.0.0.1')
cmd = "kill -HUP %s" % self._get_parent('api')
execute(cmd, raise_error=True)
msg = 'http bind_host timeout'
for _ in self.ticker(msg):
post_pids['api'] = self._get_children('api')
if check_pids(pre_pids['api'], post_pids['api']):
break
path = self._url('http', '/')
response = requests.get(path)
self.assertEqual(300, response.status_code)
del response
# Test logging configuration change
# This recycles the existing socket
conf_dir = os.path.join(self.test_dir, 'etc')
log_file = conf_dir + 'new.log'
self.assertFalse(os.path.exists(log_file))
set_config_value(self._conffile('api'), 'log_file', log_file)
cmd = "kill -HUP %s" % self._get_parent('api')
execute(cmd, raise_error=True)
msg = 'No new log file created'
for _ in self.ticker(msg):
if os.path.exists(log_file):
break
| apache-2.0 |
aferr/TimingCompartments | configs/ruby/Ruby.py | 7 | 8628 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# Copyright (c) 2009 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Brad Beckmann
import math
import m5
from m5.objects import *
from m5.defines import buildEnv
def define_options(parser):
# By default, ruby uses the simple timing cpu
parser.set_defaults(cpu_type="timing")
# ruby network options
parser.add_option("--topology", type="string", default="Crossbar",
help="check src/mem/ruby/network/topologies for complete set")
parser.add_option("--mesh-rows", type="int", default=1,
help="the number of rows in the mesh topology")
parser.add_option("--garnet-network", type="string", default=None,
help="'fixed'|'flexible'")
parser.add_option("--network-fault-model", action="store_true", default=False,
help="enable network fault model: see src/mem/ruby/network/fault_model/")
# ruby mapping options
parser.add_option("--numa-high-bit", type="int", default=0,
help="high order address bit to use for numa mapping. " \
"0 = highest bit, not specified = lowest bit")
# ruby sparse memory options
parser.add_option("--use-map", action="store_true", default=False)
parser.add_option("--map-levels", type="int", default=4)
parser.add_option("--recycle-latency", type="int", default=10,
help="Recycle latency for ruby controller input buffers")
parser.add_option("--random_seed", type="int", default=1234,
help="Used for seeding the random number generator")
parser.add_option("--ruby_stats", type="string", default="ruby.stats")
protocol = buildEnv['PROTOCOL']
exec "import %s" % protocol
eval("%s.define_options(parser)" % protocol)
def create_topology(controllers, options):
""" Called from create_system in configs/ruby/<protocol>.py
Must return an object which is a subclass of BaseTopology
found in configs/topologies/BaseTopology.py
This is a wrapper for the legacy topologies.
"""
exec "import %s as Topo" % options.topology
topology = eval("Topo.%s(controllers)" % options.topology)
return topology
def create_system(options, system, piobus = None, dma_ports = []):
system.ruby = RubySystem(clock = options.clock,
stats_filename = options.ruby_stats,
no_mem_vec = options.use_map)
ruby = system.ruby
protocol = buildEnv['PROTOCOL']
exec "import %s" % protocol
try:
(cpu_sequencers, dir_cntrls, topology) = \
eval("%s.create_system(options, system, piobus, dma_ports, ruby)"
% protocol)
except:
print "Error: could not create sytem for ruby protocol %s" % protocol
raise
# Create a port proxy for connecting the system port. This is
# independent of the protocol and kept in the protocol-agnostic
# part (i.e. here).
sys_port_proxy = RubyPortProxy(ruby_system = ruby)
# Give the system port proxy a SimObject parent without creating a
# full-fledged controller
system.sys_port_proxy = sys_port_proxy
# Connect the system port for loading of binaries etc
system.system_port = system.sys_port_proxy.slave
#
# Set the network classes based on the command line options
#
if options.garnet_network == "fixed":
class NetworkClass(GarnetNetwork_d): pass
class IntLinkClass(GarnetIntLink_d): pass
class ExtLinkClass(GarnetExtLink_d): pass
class RouterClass(GarnetRouter_d): pass
elif options.garnet_network == "flexible":
class NetworkClass(GarnetNetwork): pass
class IntLinkClass(GarnetIntLink): pass
class ExtLinkClass(GarnetExtLink): pass
class RouterClass(GarnetRouter): pass
else:
class NetworkClass(SimpleNetwork): pass
class IntLinkClass(SimpleIntLink): pass
class ExtLinkClass(SimpleExtLink): pass
class RouterClass(BasicRouter): pass
#
# Important: the topology must be instantiated before the network and after
# the controllers. Hence the separation between topology definition and
# instantiation.
#
# gem5 SimObject defined in src/mem/ruby/network/Network.py
net_topology = Topology()
net_topology.description = topology.description
routers, int_links, ext_links = topology.makeTopology(options,
IntLinkClass, ExtLinkClass, RouterClass)
net_topology.routers = routers
net_topology.int_links = int_links
net_topology.ext_links = ext_links
if options.network_fault_model:
assert(options.garnet_network == "fixed")
fault_model = FaultModel()
network = NetworkClass(ruby_system = ruby, topology = net_topology,\
enable_fault_model=True, fault_model = fault_model)
else:
network = NetworkClass(ruby_system = ruby, topology = net_topology)
#
# Loop through the directory controlers.
# Determine the total memory size of the ruby system and verify it is equal
# to physmem. However, if Ruby memory is using sparse memory in SE
# mode, then the system should not back-up the memory state with
# the Memory Vector and thus the memory size bytes should stay at 0.
# Also set the numa bits to the appropriate values.
#
total_mem_size = MemorySize('0B')
dir_bits = int(math.log(options.num_dirs, 2))
if options.numa_high_bit:
numa_bit = options.numa_high_bit
else:
# if not specified, use the lowest bits above the block offest
if dir_bits > 0:
# add 5 because bits 0-5 are the block offset
numa_bit = dir_bits + 5
else:
numa_bit = 6
for dir_cntrl in dir_cntrls:
total_mem_size.value += dir_cntrl.directory.size.value
dir_cntrl.directory.numa_high_bit = numa_bit
phys_mem_size = 0
for mem in system.memories.unproxy(system):
phys_mem_size += long(mem.range.second) - long(mem.range.first) + 1
assert(total_mem_size.value == phys_mem_size)
ruby_profiler = RubyProfiler(ruby_system = ruby,
num_of_sequencers = len(cpu_sequencers))
ruby.network = network
ruby.profiler = ruby_profiler
ruby.mem_size = total_mem_size
ruby._cpu_ruby_ports = cpu_sequencers
ruby.random_seed = options.random_seed
| bsd-3-clause |
DePierre/owtf | plugins/web/semi_passive/Spiders_Robots_and_Crawlers@OWTF-IG-001.py | 3 | 1132 | """
Robots.txt semi-passive plugin, parses robots.txt file to generate on-screen
links and save them for later spidering and analysis
"""
from framework.utils import OWTFLogger
from framework.dependency_management.dependency_resolver import ServiceLocator
DESCRIPTION = "Normal request for robots.txt analysis"
def run(PluginInfo):
plugin_helper = ServiceLocator.get_component("plugin_helper")
target = ServiceLocator.get_component("target")
requester = ServiceLocator.get_component("requester")
top_url = target.Get('top_url')
url = top_url + "/robots.txt"
test_result = []
# Use transaction cache if possible for speed
http_transaction = requester.GetTransaction(True, url, "GET")
if http_transaction is not None and http_transaction.Found:
test_result += plugin_helper.ProcessRobots(
PluginInfo,
http_transaction.GetRawResponseBody(),
top_url,
'')
else: # robots.txt NOT found
OWTFLogger.log("robots.txt was NOT found")
test_result += plugin_helper.TransactionTableForURLList(True, [url])
return test_result
| bsd-3-clause |
Intel-Corporation/tensorflow | tensorflow/contrib/eager/python/examples/revnet/resnet_preprocessing.py | 29 | 7103 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ImageNet preprocessing for ResNet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
IMAGE_SIZE = 224
CROP_PADDING = 32
def distorted_bounding_box_crop(image_bytes,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0),
max_attempts=100,
scope=None):
"""Generates cropped_image using one of the bboxes randomly distorted.
See `tf.image.sample_distorted_bounding_box` for more documentation.
Args:
image_bytes: `Tensor` of binary image data.
bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]`
where each coordinate is [0, 1) and the coordinates are arranged
as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole
image.
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
area of the image must contain at least this fraction of any bounding
box supplied.
aspect_ratio_range: An optional list of `float`s. The cropped area of the
image must have an aspect ratio = width / height within this range.
area_range: An optional list of `float`s. The cropped area of the image
must contain a fraction of the supplied image within in this range.
max_attempts: An optional `int`. Number of attempts at generating a cropped
region of the image of the specified constraints. After `max_attempts`
failures, return the entire image.
scope: Optional `str` for name scope.
Returns:
cropped image `Tensor`
"""
with tf.name_scope(scope, 'distorted_bounding_box_crop', [image_bytes, bbox]):
shape = tf.image.extract_jpeg_shape(image_bytes)
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
shape,
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, _ = sample_distorted_bounding_box
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop_window = tf.stack([offset_y, offset_x, target_height, target_width])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
return image
def _at_least_x_are_equal(a, b, x):
"""At least `x` of `a` and `b` `Tensors` are equal."""
match = tf.equal(a, b)
match = tf.cast(match, tf.int32)
return tf.greater_equal(tf.reduce_sum(match), x)
def _decode_and_random_crop(image_bytes, image_size):
"""Make a random crop of image_size."""
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
image = distorted_bounding_box_crop(
image_bytes,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(3. / 4, 4. / 3.),
area_range=(0.08, 1.0),
max_attempts=10,
scope=None)
original_shape = tf.image.extract_jpeg_shape(image_bytes)
bad = _at_least_x_are_equal(original_shape, tf.shape(image), 3)
image = tf.cond(
bad,
lambda: _decode_and_center_crop(image_bytes, image_size),
lambda: tf.image.resize_bicubic([image], # pylint: disable=g-long-lambda
[image_size, image_size])[0])
return image
def _decode_and_center_crop(image_bytes, image_size):
"""Crops to center of image with padding then scales image_size."""
shape = tf.image.extract_jpeg_shape(image_bytes)
image_height = shape[0]
image_width = shape[1]
padded_center_crop_size = tf.cast(
((image_size / (image_size + CROP_PADDING)) *
tf.cast(tf.minimum(image_height, image_width), tf.float32)),
tf.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
crop_window = tf.stack([offset_height, offset_width,
padded_center_crop_size, padded_center_crop_size])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
image = tf.image.resize_bicubic([image], [image_size, image_size])[0]
return image
def _flip(image):
"""Random horizontal image flip."""
image = tf.image.random_flip_left_right(image)
return image
def preprocess_for_train(image_bytes, use_bfloat16, image_size=IMAGE_SIZE):
"""Preprocesses the given image for evaluation.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
use_bfloat16: `bool` for whether to use bfloat16.
image_size: image size.
Returns:
A preprocessed image `Tensor`.
"""
image = _decode_and_random_crop(image_bytes, image_size)
image = _flip(image)
image = tf.reshape(image, [image_size, image_size, 3])
image = tf.image.convert_image_dtype(
image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32)
return image
def preprocess_for_eval(image_bytes, use_bfloat16, image_size=IMAGE_SIZE):
"""Preprocesses the given image for evaluation.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
use_bfloat16: `bool` for whether to use bfloat16.
image_size: image size.
Returns:
A preprocessed image `Tensor`.
"""
image = _decode_and_center_crop(image_bytes, image_size)
image = tf.reshape(image, [image_size, image_size, 3])
image = tf.image.convert_image_dtype(
image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32)
return image
def preprocess_image(image_bytes,
is_training=False,
use_bfloat16=False,
image_size=IMAGE_SIZE):
"""Preprocesses the given image.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
is_training: `bool` for whether the preprocessing is for training.
use_bfloat16: `bool` for whether to use bfloat16.
image_size: image size.
Returns:
A preprocessed image `Tensor`.
"""
if is_training:
return preprocess_for_train(image_bytes, use_bfloat16, image_size)
else:
return preprocess_for_eval(image_bytes, use_bfloat16, image_size)
| apache-2.0 |
waynecoulson/TV-Show-Downloader | lib/hachoir_parser/program/exe.py | 90 | 8701 | """
Microsoft Windows Portable Executable (PE) file parser.
Informations:
- Microsoft Portable Executable and Common Object File Format Specification:
http://www.microsoft.com/whdc/system/platform/firmware/PECOFF.mspx
Author: Victor Stinner
Creation date: 2006-08-13
"""
from lib.hachoir_parser import HachoirParser
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_core.field import (FieldSet, RootSeekableFieldSet,
UInt16, UInt32, String,
RawBytes, PaddingBytes)
from lib.hachoir_core.text_handler import textHandler, hexadecimal
from lib.hachoir_parser.program.exe_ne import NE_Header
from lib.hachoir_parser.program.exe_pe import PE_Header, PE_OptHeader, SectionHeader
from lib.hachoir_parser.program.exe_res import PE_Resource, NE_VersionInfoNode
MAX_NB_SECTION = 50
class MSDosHeader(FieldSet):
static_size = 64*8
def createFields(self):
yield String(self, "header", 2, "File header (MZ)", charset="ASCII")
yield UInt16(self, "size_mod_512", "File size in bytes modulo 512")
yield UInt16(self, "size_div_512", "File size in bytes divide by 512")
yield UInt16(self, "reloc_entries", "Number of relocation entries")
yield UInt16(self, "code_offset", "Offset to the code in the file (divided by 16)")
yield UInt16(self, "needed_memory", "Memory needed to run (divided by 16)")
yield UInt16(self, "max_memory", "Maximum memory needed to run (divided by 16)")
yield textHandler(UInt32(self, "init_ss_sp", "Initial value of SP:SS registers"), hexadecimal)
yield UInt16(self, "checksum", "Checksum")
yield textHandler(UInt32(self, "init_cs_ip", "Initial value of CS:IP registers"), hexadecimal)
yield UInt16(self, "reloc_offset", "Offset in file to relocation table")
yield UInt16(self, "overlay_number", "Overlay number")
yield PaddingBytes(self, "reserved[]", 8, "Reserved")
yield UInt16(self, "oem_id", "OEM id")
yield UInt16(self, "oem_info", "OEM info")
yield PaddingBytes(self, "reserved[]", 20, "Reserved")
yield UInt32(self, "next_offset", "Offset to next header (PE or NE)")
def isValid(self):
if 512 <= self["size_mod_512"].value:
return "Invalid field 'size_mod_512' value"
if self["code_offset"].value < 4:
return "Invalid code offset"
looks_pe = self["size_div_512"].value < 4
if looks_pe:
if self["checksum"].value != 0:
return "Invalid value of checksum"
if not (80 <= self["next_offset"].value <= 1024):
return "Invalid value of next_offset"
return ""
class ExeFile(HachoirParser, RootSeekableFieldSet):
PARSER_TAGS = {
"id": "exe",
"category": "program",
"file_ext": ("exe", "dll", "ocx"),
"mime": (u"application/x-dosexec",),
"min_size": 64*8,
#"magic": (("MZ", 0),),
"magic_regex": (("MZ.[\0\1].{4}[^\0\1\2\3]", 0),),
"description": "Microsoft Windows Portable Executable"
}
endian = LITTLE_ENDIAN
def __init__(self, stream, **args):
RootSeekableFieldSet.__init__(self, None, "root", stream, None, stream.askSize(self))
HachoirParser.__init__(self, stream, **args)
def validate(self):
if self.stream.readBytes(0, 2) != 'MZ':
return "Wrong header"
err = self["msdos"].isValid()
if err:
return "Invalid MSDOS header: "+err
if self.isPE():
if MAX_NB_SECTION < self["pe_header/nb_section"].value:
return "Invalid number of section (%s)" \
% self["pe_header/nb_section"].value
return True
def createFields(self):
yield MSDosHeader(self, "msdos", "MS-DOS program header")
if self.isPE() or self.isNE():
offset = self["msdos/next_offset"].value
self.seekByte(offset, relative=False)
if self.isPE():
for field in self.parsePortableExecutable():
yield field
elif self.isNE():
for field in self.parseNE_Executable():
yield field
else:
offset = self["msdos/code_offset"].value * 16
self.seekByte(offset, relative=False)
def parseNE_Executable(self):
yield NE_Header(self, "ne_header")
# FIXME: Compute resource offset instead of using searchBytes()
# Ugly hack to get find version info structure
start = self.current_size
addr = self.stream.searchBytes('VS_VERSION_INFO', start)
if addr:
self.seekBit(addr-32)
yield NE_VersionInfoNode(self, "info")
def parsePortableExecutable(self):
# Read PE header
yield PE_Header(self, "pe_header")
# Read PE optional header
size = self["pe_header/opt_hdr_size"].value
rsrc_rva = None
if size:
yield PE_OptHeader(self, "pe_opt_header", size=size*8)
if "pe_opt_header/resource/rva" in self:
rsrc_rva = self["pe_opt_header/resource/rva"].value
# Read section headers
sections = []
for index in xrange(self["pe_header/nb_section"].value):
section = SectionHeader(self, "section_hdr[]")
yield section
if section["phys_size"].value:
sections.append(section)
# Read sections
sections.sort(key=lambda field: field["phys_off"].value)
for section in sections:
self.seekByte(section["phys_off"].value)
size = section["phys_size"].value
if size:
name = section.createSectionName()
if rsrc_rva is not None and section["rva"].value == rsrc_rva:
yield PE_Resource(self, name, section, size=size*8)
else:
yield RawBytes(self, name, size)
def isPE(self):
if not hasattr(self, "_is_pe"):
self._is_pe = False
offset = self["msdos/next_offset"].value * 8
if 2*8 <= offset \
and (offset+PE_Header.static_size) <= self.size \
and self.stream.readBytes(offset, 4) == 'PE\0\0':
self._is_pe = True
return self._is_pe
def isNE(self):
if not hasattr(self, "_is_ne"):
self._is_ne = False
offset = self["msdos/next_offset"].value * 8
if 64*8 <= offset \
and (offset+NE_Header.static_size) <= self.size \
and self.stream.readBytes(offset, 2) == 'NE':
self._is_ne = True
return self._is_ne
def getResource(self):
# MS-DOS program: no resource
if not self.isPE():
return None
# Check if PE has resource or not
if "pe_opt_header/resource/size" in self:
if not self["pe_opt_header/resource/size"].value:
return None
if "section_rsrc" in self:
return self["section_rsrc"]
return None
def createDescription(self):
if self.isPE():
if self["pe_header/is_dll"].value:
text = u"Microsoft Windows DLL"
else:
text = u"Microsoft Windows Portable Executable"
info = [self["pe_header/cpu"].display]
if "pe_opt_header" in self:
hdr = self["pe_opt_header"]
info.append(hdr["subsystem"].display)
if self["pe_header/is_stripped"].value:
info.append(u"stripped")
return u"%s: %s" % (text, ", ".join(info))
elif self.isNE():
return u"New-style Executable (NE) for Microsoft MS Windows 3.x"
else:
return u"MS-DOS executable"
def createContentSize(self):
if self.isPE():
size = 0
for index in xrange(self["pe_header/nb_section"].value):
section = self["section_hdr[%u]" % index]
section_size = section["phys_size"].value
if not section_size:
continue
section_size = (section_size + section["phys_off"].value) * 8
if size:
size = max(size, section_size)
else:
size = section_size
if size:
return size
else:
return None
elif self.isNE():
# TODO: Guess NE size
return None
else:
size = self["msdos/size_mod_512"].value + (self["msdos/size_div_512"].value-1) * 512
if size < 0:
return None
return size*8
| gpl-3.0 |
rcosnita/fantastico-todo | todo/frontend/models/tasks.py | 1 | 2113 | '''
Copyright 2013 Cosnita Radu Viorel
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
.. codeauthor:: Radu Viorel Cosnita <radu.cosnita@gmail.com>
.. py:module:: todo.models.tasks
'''
from fantastico.mvc import BASEMODEL
from fantastico.roa.resource_decorator import Resource
from sqlalchemy.schema import Column
from sqlalchemy.types import Integer, String, Text, SmallInteger
from todo.frontend.validators.task_validator import TaskValidator
@Resource(name="Task", url="/tasks", validator=TaskValidator)
class Task(BASEMODEL):
'''This class provides the task model required for todo application.'''
__tablename__ = "tasks"
task_id = Column("task_id", Integer, primary_key=True, autoincrement=True)
name = Column("name", String(200), nullable=False)
description = Column("description", Text)
status = Column("status", SmallInteger, nullable=False)
userid = Column("userid", String(200), nullable=False)
def __init__(self, name=None, description=None, status=0, userid=None):
self.name = name
self.description = description
self.status = status
self.userid = userid
| mit |
twiest/openshift-ansible | roles/lib_openshift/library/oc_adm_router.py | 2 | 110817 | #!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import fcntl
import json
import time
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/router -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_adm_router
short_description: Module to manage openshift router
description:
- Manage openshift router programmatically.
options:
state:
description:
- Whether to create or delete the router
- present - create the router
- absent - remove the router
- list - return the current representation of a router
required: false
default: present
choices:
- present
- absent
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
name:
description:
- The name of the router
required: false
default: router
aliases: []
namespace:
description:
- The namespace where to manage the router.
required: false
default: default
aliases: []
images:
description:
- The image to base this router on - ${component} will be replaced with --type
required: 'openshift3/ose-${component}:${version}'
default: None
aliases: []
latest_images:
description:
- If true, attempt to use the latest image for the registry instead of the latest release.
required: false
default: False
aliases: []
labels:
description:
- A set of labels to uniquely identify the registry and its components.
required: false
default: None
aliases: []
ports:
description:
- A list of strings in the 'port:port' format
required: False
default:
- 80:80
- 443:443
aliases: []
replicas:
description:
- The replication factor of the registry; commonly 2 when high availability is desired.
required: False
default: 1
aliases: []
selector:
description:
- Selector used to filter nodes on deployment. Used to run routers on a specific set of nodes.
required: False
default: None
aliases: []
service_account:
description:
- Name of the service account to use to run the router pod.
required: False
default: router
aliases: []
router_type:
description:
- The router image to use - if you specify --images this flag may be ignored.
required: false
default: haproxy-router
aliases: []
external_host:
description:
- If the underlying router implementation connects with an external host, this is the external host's hostname.
required: false
default: None
aliases: []
external_host_vserver:
description:
- If the underlying router implementation uses virtual servers, this is the name of the virtual server for HTTP connections.
required: false
default: None
aliases: []
external_host_insecure:
description:
- If the underlying router implementation connects with an external host
- over a secure connection, this causes the router to skip strict certificate verification with the external host.
required: false
default: False
aliases: []
external_host_partition_path:
description:
- If the underlying router implementation uses partitions for control boundaries, this is the path to use for that partition.
required: false
default: None
aliases: []
external_host_username:
description:
- If the underlying router implementation connects with an external host, this is the username for authenticating with the external host.
required: false
default: None
aliases: []
external_host_password:
description:
- If the underlying router implementation connects with an external host, this is the password for authenticating with the external host.
required: false
default: None
aliases: []
external_host_private_key:
description:
- If the underlying router implementation requires an SSH private key, this is the path to the private key file.
required: false
default: None
aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment:
- There are some exceptions to note when doing the idempotency in this module.
- The strategy is to use the oc adm router command to generate a default
- configuration when creating or updating a router. Often times there
- differences from the generated template and what is in memory in openshift.
- We make exceptions to not check these specific values when comparing objects.
- Here are a list of exceptions:
- - DeploymentConfig:
- dnsPolicy
- terminationGracePeriodSeconds
- restartPolicy
- timeoutSeconds
- livenessProbe
- readinessProbe
- terminationMessagePath
- hostPort
- defaultMode
- Service:
- portalIP
- clusterIP
- sessionAffinity
- type
- ServiceAccount:
- secrets
- imagePullSecrets
'''
EXAMPLES = '''
- name: create routers
oc_adm_router:
name: router
service_account: router
replicas: 2
namespace: default
selector: type=infra
cert_file: /etc/origin/master/named_certificates/router.crt
key_file: /etc/origin/master/named_certificates/router.key
cacert_file: /etc/origin/master/named_certificates/router.ca
edits:
- key: spec.strategy.rollingParams
value:
intervalSeconds: 1
maxSurge: 50%
maxUnavailable: 50%
timeoutSeconds: 600
updatePeriodSeconds: 1
action: put
- key: spec.template.spec.containers[0].resources.limits.memory
value: 2G
action: put
- key: spec.template.spec.containers[0].resources.requests.memory
value: 1G
action: put
- key: spec.template.spec.containers[0].env
value:
name: EXTENDED_VALIDATION
value: 'false'
action: update
register: router_out
run_once: True
'''
# -*- -*- -*- End included fragment: doc/router -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods,too-many-instance-attributes
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup_ext=None,
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
if backup_ext is None:
self.backup_ext = ".{}".format(time.strftime("%Y%m%dT%H%M%S"))
else:
self.backup_ext = backup_ext
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def remove_entry(data, key, index=None, value=None, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
if value is not None:
data.pop(value)
elif index is not None:
raise YeditException("remove_entry for a dictionary does not have an index {}".format(index))
else:
data.clear()
return True
elif key == '' and isinstance(data, list):
ind = None
if value is not None:
try:
ind = data.index(value)
except ValueError:
return False
elif index is not None:
ind = index
else:
del data[:]
if ind is not None:
data.pop(ind)
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
fcntl.flock(yfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
yfd.write(contents)
fcntl.flock(yfd, fcntl.LOCK_UN)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, '{}{}'.format(self.filename, self.backup_ext))
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
if self.content_type == 'yaml':
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
elif self.content_type == 'json':
Yedit._write(self.filename, json.dumps(self.yaml_dict, indent=4, sort_keys=True))
else:
raise YeditException('Unsupported content_type: {}.'.format(self.content_type) +
'Please specify a content_type of yaml or json.')
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path, index=None, value=None):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, index, value, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.safe_load(str(invalue))
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
content_type=params['content_type'],
backup_ext=params['backup_ext'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'], params['index'], params['value'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, edits=None, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
updated = False
if content is not None:
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
updated = True
elif edits is not None:
results = Yedit.process_edits(edits, yed)
if results['changed']:
updated = True
if updated:
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
# We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
else:
raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-p')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, name=None, selector=None, field_selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
if field_selector is not None:
cmd.append('--field-selector={}'.format(field_selector))
# Name cannot be used with selector or field_selector.
if selector is None and field_selector is None and name is not None:
cmd.append(name)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"cmd": ' '.join(cmds)}
if output_type == 'json':
rval['results'] = {}
if output and stdout:
try:
rval['results'] = json.loads(stdout)
except ValueError as verr:
if "No JSON object could be decoded" in verr.args:
rval['err'] = verr.args
elif output_type == 'raw':
rval['results'] = stdout if output else ''
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
"stdout": stdout})
return rval
class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
version = version[1:] # Remove the 'v' prefix
versions_dict[tech + '_numeric'] = version.split('+')[0]
# "3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = "{}.{}".format(*version.split('.'))
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import rpm
transaction_set = rpm.TransactionSet()
rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self, ascommalist=''):
'''return all options as a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs'''
return self.stringify(ascommalist)
def stringify(self, ascommalist=''):
''' return the options hash as cli params in a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
val = data['value']
rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/service.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class ServiceConfig(object):
''' Handle service options '''
# pylint: disable=too-many-arguments
def __init__(self,
sname,
namespace,
ports,
annotations=None,
selector=None,
labels=None,
cluster_ip=None,
portal_ip=None,
session_affinity=None,
service_type=None,
external_ips=None):
''' constructor for handling service options '''
self.name = sname
self.namespace = namespace
self.ports = ports
self.annotations = annotations
self.selector = selector
self.labels = labels
self.cluster_ip = cluster_ip
self.portal_ip = portal_ip
self.session_affinity = session_affinity
self.service_type = service_type
self.external_ips = external_ips
self.data = {}
self.create_dict()
def create_dict(self):
''' instantiates a service dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Service'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
if self.labels:
self.data['metadata']['labels'] = {}
for lab, lab_value in self.labels.items():
self.data['metadata']['labels'][lab] = lab_value
if self.annotations:
self.data['metadata']['annotations'] = self.annotations
self.data['spec'] = {}
if self.ports:
self.data['spec']['ports'] = self.ports
else:
self.data['spec']['ports'] = []
if self.selector:
self.data['spec']['selector'] = self.selector
self.data['spec']['sessionAffinity'] = self.session_affinity or 'None'
if self.cluster_ip:
self.data['spec']['clusterIP'] = self.cluster_ip
if self.portal_ip:
self.data['spec']['portalIP'] = self.portal_ip
if self.service_type:
self.data['spec']['type'] = self.service_type
if self.external_ips:
self.data['spec']['externalIPs'] = self.external_ips
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class Service(Yedit):
''' Class to model the oc service object '''
port_path = "spec.ports"
portal_ip = "spec.portalIP"
cluster_ip = "spec.clusterIP"
selector_path = 'spec.selector'
kind = 'Service'
external_ips = "spec.externalIPs"
def __init__(self, content):
'''Service constructor'''
super(Service, self).__init__(content=content)
def get_ports(self):
''' get a list of ports '''
return self.get(Service.port_path) or []
def get_selector(self):
''' get the service selector'''
return self.get(Service.selector_path) or {}
def add_ports(self, inc_ports):
''' add a port object to the ports list '''
if not isinstance(inc_ports, list):
inc_ports = [inc_ports]
ports = self.get_ports()
if not ports:
self.put(Service.port_path, inc_ports)
else:
ports.extend(inc_ports)
return True
def find_ports(self, inc_port):
''' find a specific port '''
for port in self.get_ports():
if port['port'] == inc_port['port']:
return port
return None
def delete_ports(self, inc_ports):
''' remove a port from a service '''
if not isinstance(inc_ports, list):
inc_ports = [inc_ports]
ports = self.get(Service.port_path) or []
if not ports:
return True
removed = False
for inc_port in inc_ports:
port = self.find_ports(inc_port)
if port:
ports.remove(port)
removed = True
return removed
def add_cluster_ip(self, sip):
'''add cluster ip'''
self.put(Service.cluster_ip, sip)
def add_portal_ip(self, pip):
'''add cluster ip'''
self.put(Service.portal_ip, pip)
def get_external_ips(self):
''' get a list of external_ips '''
return self.get(Service.external_ips) or []
def add_external_ips(self, inc_external_ips):
''' add an external_ip to the external_ips list '''
if not isinstance(inc_external_ips, list):
inc_external_ips = [inc_external_ips]
external_ips = self.get_external_ips()
if not external_ips:
self.put(Service.external_ips, inc_external_ips)
else:
external_ips.extend(inc_external_ips)
return True
def find_external_ips(self, inc_external_ip):
''' find a specific external IP '''
val = None
try:
idx = self.get_external_ips().index(inc_external_ip)
val = self.get_external_ips()[idx]
except ValueError:
pass
return val
def delete_external_ips(self, inc_external_ips):
''' remove an external IP from a service '''
if not isinstance(inc_external_ips, list):
inc_external_ips = [inc_external_ips]
external_ips = self.get(Service.external_ips) or []
if not external_ips:
return True
removed = False
for inc_external_ip in inc_external_ips:
external_ip = self.find_external_ips(inc_external_ip)
if external_ip:
external_ips.remove(external_ip)
removed = True
return removed
# -*- -*- -*- End included fragment: lib/service.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/deploymentconfig.py -*- -*- -*-
# pylint: disable=too-many-public-methods
class DeploymentConfig(Yedit):
''' Class to model an openshift DeploymentConfig'''
default_deployment_config = '''
apiVersion: v1
kind: DeploymentConfig
metadata:
name: default_dc
namespace: default
spec:
replicas: 0
selector:
default_dc: default_dc
strategy:
resources: {}
rollingParams:
intervalSeconds: 1
maxSurge: 0
maxUnavailable: 25%
timeoutSeconds: 600
updatePercent: -25
updatePeriodSeconds: 1
type: Rolling
template:
metadata:
spec:
containers:
- env:
- name: default
value: default
image: default
imagePullPolicy: IfNotPresent
name: default_dc
ports:
- containerPort: 8000
hostPort: 8000
protocol: TCP
name: default_port
resources: {}
terminationMessagePath: /dev/termination-log
dnsPolicy: ClusterFirst
hostNetwork: true
nodeSelector:
type: compute
restartPolicy: Always
securityContext: {}
serviceAccount: default
serviceAccountName: default
terminationGracePeriodSeconds: 30
triggers:
- type: ConfigChange
'''
replicas_path = "spec.replicas"
env_path = "spec.template.spec.containers[0].env"
volumes_path = "spec.template.spec.volumes"
container_path = "spec.template.spec.containers"
volume_mounts_path = "spec.template.spec.containers[0].volumeMounts"
def __init__(self, content=None):
''' Constructor for deploymentconfig '''
if not content:
content = DeploymentConfig.default_deployment_config
super(DeploymentConfig, self).__init__(content=content)
def add_env_value(self, key, value):
''' add key, value pair to env array '''
rval = False
env = self.get_env_vars()
if env:
env.append({'name': key, 'value': value})
rval = True
else:
result = self.put(DeploymentConfig.env_path, {'name': key, 'value': value})
rval = result[0]
return rval
def exists_env_value(self, key, value):
''' return whether a key, value pair exists '''
results = self.get_env_vars()
if not results:
return False
for result in results:
if result['name'] == key and result['value'] == value:
return True
return False
def exists_env_key(self, key):
''' return whether a key, value pair exists '''
results = self.get_env_vars()
if not results:
return False
for result in results:
if result['name'] == key:
return True
return False
def get_env_var(self, key):
'''return a environment variables '''
results = self.get(DeploymentConfig.env_path) or []
if not results:
return None
for env_var in results:
if env_var['name'] == key:
return env_var
return None
def get_env_vars(self):
'''return a environment variables '''
return self.get(DeploymentConfig.env_path) or []
def delete_env_var(self, keys):
'''delete a list of keys '''
if not isinstance(keys, list):
keys = [keys]
env_vars_array = self.get_env_vars()
modified = False
idx = None
for key in keys:
for env_idx, env_var in enumerate(env_vars_array):
if env_var['name'] == key:
idx = env_idx
break
if idx:
modified = True
del env_vars_array[idx]
if modified:
return True
return False
def update_env_var(self, key, value):
'''place an env in the env var list'''
env_vars_array = self.get_env_vars()
idx = None
for env_idx, env_var in enumerate(env_vars_array):
if env_var['name'] == key:
idx = env_idx
break
if idx:
env_vars_array[idx]['value'] = value
else:
self.add_env_value(key, value)
return True
def exists_volume_mount(self, volume_mount):
''' return whether a volume mount exists '''
exist_volume_mounts = self.get_volume_mounts()
if not exist_volume_mounts:
return False
volume_mount_found = False
for exist_volume_mount in exist_volume_mounts:
if exist_volume_mount['name'] == volume_mount['name']:
volume_mount_found = True
break
return volume_mount_found
def exists_volume(self, volume):
''' return whether a volume exists '''
exist_volumes = self.get_volumes()
volume_found = False
for exist_volume in exist_volumes:
if exist_volume['name'] == volume['name']:
volume_found = True
break
return volume_found
def find_volume_by_name(self, volume, mounts=False):
''' return the index of a volume '''
volumes = []
if mounts:
volumes = self.get_volume_mounts()
else:
volumes = self.get_volumes()
for exist_volume in volumes:
if exist_volume['name'] == volume['name']:
return exist_volume
return None
def get_replicas(self):
''' return replicas setting '''
return self.get(DeploymentConfig.replicas_path)
def get_volume_mounts(self):
'''return volume mount information '''
return self.get_volumes(mounts=True)
def get_volumes(self, mounts=False):
'''return volume mount information '''
if mounts:
return self.get(DeploymentConfig.volume_mounts_path) or []
return self.get(DeploymentConfig.volumes_path) or []
def delete_volume_by_name(self, volume):
'''delete a volume '''
modified = False
exist_volume_mounts = self.get_volume_mounts()
exist_volumes = self.get_volumes()
del_idx = None
for idx, exist_volume in enumerate(exist_volumes):
if 'name' in exist_volume and exist_volume['name'] == volume['name']:
del_idx = idx
break
if del_idx != None:
del exist_volumes[del_idx]
modified = True
del_idx = None
for idx, exist_volume_mount in enumerate(exist_volume_mounts):
if 'name' in exist_volume_mount and exist_volume_mount['name'] == volume['name']:
del_idx = idx
break
if del_idx != None:
del exist_volume_mounts[idx]
modified = True
return modified
def add_volume_mount(self, volume_mount):
''' add a volume or volume mount to the proper location '''
exist_volume_mounts = self.get_volume_mounts()
if not exist_volume_mounts and volume_mount:
self.put(DeploymentConfig.volume_mounts_path, [volume_mount])
else:
exist_volume_mounts.append(volume_mount)
def add_volume(self, volume):
''' add a volume or volume mount to the proper location '''
exist_volumes = self.get_volumes()
if not volume:
return
if not exist_volumes:
self.put(DeploymentConfig.volumes_path, [volume])
else:
exist_volumes.append(volume)
def update_replicas(self, replicas):
''' update replicas value '''
self.put(DeploymentConfig.replicas_path, replicas)
def update_volume(self, volume):
'''place an env in the env var list'''
exist_volumes = self.get_volumes()
if not volume:
return False
# update the volume
update_idx = None
for idx, exist_vol in enumerate(exist_volumes):
if exist_vol['name'] == volume['name']:
update_idx = idx
break
if update_idx != None:
exist_volumes[update_idx] = volume
else:
self.add_volume(volume)
return True
def update_volume_mount(self, volume_mount):
'''place an env in the env var list'''
modified = False
exist_volume_mounts = self.get_volume_mounts()
if not volume_mount:
return False
# update the volume mount
for exist_vol_mount in exist_volume_mounts:
if exist_vol_mount['name'] == volume_mount['name']:
if 'mountPath' in exist_vol_mount and \
str(exist_vol_mount['mountPath']) != str(volume_mount['mountPath']):
exist_vol_mount['mountPath'] = volume_mount['mountPath']
modified = True
break
if not modified:
self.add_volume_mount(volume_mount)
modified = True
return modified
def needs_update_volume(self, volume, volume_mount):
''' verify a volume update is needed '''
exist_volume = self.find_volume_by_name(volume)
exist_volume_mount = self.find_volume_by_name(volume, mounts=True)
results = []
results.append(exist_volume['name'] == volume['name'])
if 'secret' in volume:
results.append('secret' in exist_volume)
results.append(exist_volume['secret']['secretName'] == volume['secret']['secretName'])
results.append(exist_volume_mount['name'] == volume_mount['name'])
results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath'])
elif 'emptyDir' in volume:
results.append(exist_volume_mount['name'] == volume['name'])
results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath'])
elif 'persistentVolumeClaim' in volume:
pvc = 'persistentVolumeClaim'
results.append(pvc in exist_volume)
if results[-1]:
results.append(exist_volume[pvc]['claimName'] == volume[pvc]['claimName'])
if 'claimSize' in volume[pvc]:
results.append(exist_volume[pvc]['claimSize'] == volume[pvc]['claimSize'])
elif 'hostpath' in volume:
results.append('hostPath' in exist_volume)
results.append(exist_volume['hostPath']['path'] == volume_mount['mountPath'])
return not all(results)
def needs_update_replicas(self, replicas):
''' verify whether a replica update is needed '''
current_reps = self.get(DeploymentConfig.replicas_path)
return not current_reps == replicas
# -*- -*- -*- End included fragment: lib/deploymentconfig.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/serviceaccount.py -*- -*- -*-
class ServiceAccountConfig(object):
'''Service account config class
This class stores the options and returns a default service account
'''
# pylint: disable=too-many-arguments
def __init__(self, sname, namespace, kubeconfig, secrets=None, image_pull_secrets=None):
self.name = sname
self.kubeconfig = kubeconfig
self.namespace = namespace
self.secrets = secrets or []
self.image_pull_secrets = image_pull_secrets or []
self.data = {}
self.create_dict()
def create_dict(self):
''' instantiate a properly structured volume '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'ServiceAccount'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
self.data['secrets'] = []
if self.secrets:
for sec in self.secrets:
self.data['secrets'].append({"name": sec})
self.data['imagePullSecrets'] = []
if self.image_pull_secrets:
for sec in self.image_pull_secrets:
self.data['imagePullSecrets'].append({"name": sec})
class ServiceAccount(Yedit):
''' Class to wrap the oc command line tools '''
image_pull_secrets_path = "imagePullSecrets"
secrets_path = "secrets"
def __init__(self, content):
'''ServiceAccount constructor'''
super(ServiceAccount, self).__init__(content=content)
self._secrets = None
self._image_pull_secrets = None
@property
def image_pull_secrets(self):
''' property for image_pull_secrets '''
if self._image_pull_secrets is None:
self._image_pull_secrets = self.get(ServiceAccount.image_pull_secrets_path) or []
return self._image_pull_secrets
@image_pull_secrets.setter
def image_pull_secrets(self, secrets):
''' property for secrets '''
self._image_pull_secrets = secrets
@property
def secrets(self):
''' property for secrets '''
if not self._secrets:
self._secrets = self.get(ServiceAccount.secrets_path) or []
return self._secrets
@secrets.setter
def secrets(self, secrets):
''' property for secrets '''
self._secrets = secrets
def delete_secret(self, inc_secret):
''' remove a secret '''
remove_idx = None
for idx, sec in enumerate(self.secrets):
if sec['name'] == inc_secret:
remove_idx = idx
break
if remove_idx:
del self.secrets[remove_idx]
return True
return False
def delete_image_pull_secret(self, inc_secret):
''' remove a image_pull_secret '''
remove_idx = None
for idx, sec in enumerate(self.image_pull_secrets):
if sec['name'] == inc_secret:
remove_idx = idx
break
if remove_idx:
del self.image_pull_secrets[remove_idx]
return True
return False
def find_secret(self, inc_secret):
'''find secret'''
for secret in self.secrets:
if secret['name'] == inc_secret:
return secret
return None
def find_image_pull_secret(self, inc_secret):
'''find secret'''
for secret in self.image_pull_secrets:
if secret['name'] == inc_secret:
return secret
return None
def add_secret(self, inc_secret):
'''add secret'''
if self.secrets:
self.secrets.append({"name": inc_secret}) # pylint: disable=no-member
else:
self.put(ServiceAccount.secrets_path, [{"name": inc_secret}])
def add_image_pull_secret(self, inc_secret):
'''add image_pull_secret'''
if self.image_pull_secrets:
self.image_pull_secrets.append({"name": inc_secret}) # pylint: disable=no-member
else:
self.put(ServiceAccount.image_pull_secrets_path, [{"name": inc_secret}])
# -*- -*- -*- End included fragment: lib/serviceaccount.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/secret.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class SecretConfig(object):
''' Handle secret options '''
# pylint: disable=too-many-arguments
def __init__(self,
sname,
namespace,
kubeconfig,
secrets=None,
stype=None,
annotations=None):
''' constructor for handling secret options '''
self.kubeconfig = kubeconfig
self.name = sname
self.type = stype
self.namespace = namespace
self.secrets = secrets
self.annotations = annotations
self.data = {}
self.create_dict()
def create_dict(self):
''' assign the correct properties for a secret dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Secret'
self.data['type'] = self.type
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
self.data['data'] = {}
if self.secrets:
for key, value in self.secrets.items():
self.data['data'][key] = value
if self.annotations:
self.data['metadata']['annotations'] = self.annotations
# pylint: disable=too-many-instance-attributes
class Secret(Yedit):
''' Class to wrap the oc command line tools '''
secret_path = "data"
kind = 'secret'
def __init__(self, content):
'''secret constructor'''
super(Secret, self).__init__(content=content)
self._secrets = None
@property
def secrets(self):
'''secret property getter'''
if self._secrets is None:
self._secrets = self.get_secrets()
return self._secrets
@secrets.setter
def secrets(self):
'''secret property setter'''
if self._secrets is None:
self._secrets = self.get_secrets()
return self._secrets
def get_secrets(self):
''' returns all of the defined secrets '''
return self.get(Secret.secret_path) or {}
def add_secret(self, key, value):
''' add a secret '''
if self.secrets:
self.secrets[key] = value
else:
self.put(Secret.secret_path, {key: value})
return True
def delete_secret(self, key):
''' delete secret'''
try:
del self.secrets[key]
except KeyError as _:
return False
return True
def find_secret(self, key):
''' find secret'''
rval = None
try:
rval = self.secrets[key]
except KeyError as _:
return None
return {'key': key, 'value': rval}
def update_secret(self, key, value):
''' update a secret'''
if key in self.secrets:
self.secrets[key] = value
else:
self.add_secret(key, value)
return True
# -*- -*- -*- End included fragment: lib/secret.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/rolebinding.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class RoleBindingConfig(object):
''' Handle rolebinding config '''
# pylint: disable=too-many-arguments
def __init__(self,
name,
namespace,
kubeconfig,
group_names=None,
role_ref=None,
subjects=None,
usernames=None):
''' constructor for handling rolebinding options '''
self.kubeconfig = kubeconfig
self.name = name
self.namespace = namespace
self.group_names = group_names
self.role_ref = role_ref
self.subjects = subjects
self.usernames = usernames
self.data = {}
self.create_dict()
def create_dict(self):
''' create a default rolebinding as a dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'RoleBinding'
self.data['groupNames'] = self.group_names
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
self.data['roleRef'] = self.role_ref
self.data['subjects'] = self.subjects
self.data['userNames'] = self.usernames
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class RoleBinding(Yedit):
''' Class to model a rolebinding openshift object'''
group_names_path = "groupNames"
role_ref_path = "roleRef"
subjects_path = "subjects"
user_names_path = "userNames"
kind = 'RoleBinding'
def __init__(self, content):
'''RoleBinding constructor'''
super(RoleBinding, self).__init__(content=content)
self._subjects = None
self._role_ref = None
self._group_names = None
self._user_names = None
@property
def subjects(self):
''' subjects property '''
if self._subjects is None:
self._subjects = self.get_subjects()
return self._subjects
@subjects.setter
def subjects(self, data):
''' subjects property setter'''
self._subjects = data
@property
def role_ref(self):
''' role_ref property '''
if self._role_ref is None:
self._role_ref = self.get_role_ref()
return self._role_ref
@role_ref.setter
def role_ref(self, data):
''' role_ref property setter'''
self._role_ref = data
@property
def group_names(self):
''' group_names property '''
if self._group_names is None:
self._group_names = self.get_group_names()
return self._group_names
@group_names.setter
def group_names(self, data):
''' group_names property setter'''
self._group_names = data
@property
def user_names(self):
''' user_names property '''
if self._user_names is None:
self._user_names = self.get_user_names()
return self._user_names
@user_names.setter
def user_names(self, data):
''' user_names property setter'''
self._user_names = data
def get_group_names(self):
''' return groupNames '''
return self.get(RoleBinding.group_names_path) or []
def get_user_names(self):
''' return usernames '''
return self.get(RoleBinding.user_names_path) or []
def get_role_ref(self):
''' return role_ref '''
return self.get(RoleBinding.role_ref_path) or {}
def get_subjects(self):
''' return subjects '''
return self.get(RoleBinding.subjects_path) or []
#### ADD #####
def add_subject(self, inc_subject):
''' add a subject '''
if self.subjects:
# pylint: disable=no-member
self.subjects.append(inc_subject)
else:
self.put(RoleBinding.subjects_path, [inc_subject])
return True
def add_role_ref(self, inc_role_ref):
''' add a role_ref '''
if not self.role_ref:
self.put(RoleBinding.role_ref_path, {"name": inc_role_ref})
return True
return False
def add_group_names(self, inc_group_names):
''' add a group_names '''
if self.group_names:
# pylint: disable=no-member
self.group_names.append(inc_group_names)
else:
self.put(RoleBinding.group_names_path, [inc_group_names])
return True
def add_user_name(self, inc_user_name):
''' add a username '''
if self.user_names:
# pylint: disable=no-member
self.user_names.append(inc_user_name)
else:
self.put(RoleBinding.user_names_path, [inc_user_name])
return True
#### /ADD #####
#### Remove #####
def remove_subject(self, inc_subject):
''' remove a subject '''
try:
# pylint: disable=no-member
self.subjects.remove(inc_subject)
except ValueError as _:
return False
return True
def remove_role_ref(self, inc_role_ref):
''' remove a role_ref '''
if self.role_ref and self.role_ref['name'] == inc_role_ref:
del self.role_ref['name']
return True
return False
def remove_group_name(self, inc_group_name):
''' remove a groupname '''
try:
# pylint: disable=no-member
self.group_names.remove(inc_group_name)
except ValueError as _:
return False
return True
def remove_user_name(self, inc_user_name):
''' remove a username '''
try:
# pylint: disable=no-member
self.user_names.remove(inc_user_name)
except ValueError as _:
return False
return True
#### /REMOVE #####
#### UPDATE #####
def update_subject(self, inc_subject):
''' update a subject '''
try:
# pylint: disable=no-member
index = self.subjects.index(inc_subject)
except ValueError as _:
return self.add_subject(inc_subject)
self.subjects[index] = inc_subject
return True
def update_group_name(self, inc_group_name):
''' update a groupname '''
try:
# pylint: disable=no-member
index = self.group_names.index(inc_group_name)
except ValueError as _:
return self.add_group_names(inc_group_name)
self.group_names[index] = inc_group_name
return True
def update_user_name(self, inc_user_name):
''' update a username '''
try:
# pylint: disable=no-member
index = self.user_names.index(inc_user_name)
except ValueError as _:
return self.add_user_name(inc_user_name)
self.user_names[index] = inc_user_name
return True
def update_role_ref(self, inc_role_ref):
''' update a role_ref '''
self.role_ref['name'] = inc_role_ref
return True
#### /UPDATE #####
#### FIND ####
def find_subject(self, inc_subject):
''' find a subject '''
index = None
try:
# pylint: disable=no-member
index = self.subjects.index(inc_subject)
except ValueError as _:
return index
return index
def find_group_name(self, inc_group_name):
''' find a group_name '''
index = None
try:
# pylint: disable=no-member
index = self.group_names.index(inc_group_name)
except ValueError as _:
return index
return index
def find_user_name(self, inc_user_name):
''' find a user_name '''
index = None
try:
# pylint: disable=no-member
index = self.user_names.index(inc_user_name)
except ValueError as _:
return index
return index
def find_role_ref(self, inc_role_ref):
''' find a user_name '''
if self.role_ref and self.role_ref['name'] == inc_role_ref['name']:
return self.role_ref
return None
# -*- -*- -*- End included fragment: lib/rolebinding.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_adm_router.py -*- -*- -*-
class RouterException(Exception):
''' Router exception'''
pass
class RouterConfig(OpenShiftCLIConfig):
''' RouterConfig is a DTO for the router. '''
def __init__(self, rname, namespace, kubeconfig, router_options):
super(RouterConfig, self).__init__(rname, namespace, kubeconfig, router_options)
class Router(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
def __init__(self,
router_config,
verbose=False):
''' Constructor for OpenshiftOC
a router consists of 3 or more parts
- dc/router
- svc/router
- sa/router
- secret/router-certs
- clusterrolebinding/router-router-role
'''
super(Router, self).__init__('default', router_config.kubeconfig, verbose)
self.config = router_config
self.verbose = verbose
self.router_parts = [{'kind': 'dc', 'name': self.config.name},
{'kind': 'svc', 'name': self.config.name},
{'kind': 'sa', 'name': self.config.config_options['service_account']['value']},
{'kind': 'secret', 'name': self.config.name + '-certs'},
{'kind': 'clusterrolebinding', 'name': 'router-' + self.config.name + '-role'},
]
self.__prepared_router = None
self.dconfig = None
self.svc = None
self._secret = None
self._serviceaccount = None
self._rolebinding = None
@property
def prepared_router(self):
''' property for the prepared router'''
if self.__prepared_router is None:
results = self._prepare_router()
if not results or 'returncode' in results and results['returncode'] != 0:
if 'stderr' in results:
raise RouterException('Could not perform router preparation: %s' % results['stderr'])
raise RouterException('Could not perform router preparation.')
self.__prepared_router = results
return self.__prepared_router
@prepared_router.setter
def prepared_router(self, obj):
'''setter for the prepared_router'''
self.__prepared_router = obj
@property
def deploymentconfig(self):
''' property deploymentconfig'''
return self.dconfig
@deploymentconfig.setter
def deploymentconfig(self, config):
''' setter for property deploymentconfig '''
self.dconfig = config
@property
def service(self):
''' property for service '''
return self.svc
@service.setter
def service(self, config):
''' setter for property service '''
self.svc = config
@property
def secret(self):
''' property secret '''
return self._secret
@secret.setter
def secret(self, config):
''' setter for property secret '''
self._secret = config
@property
def serviceaccount(self):
''' property for serviceaccount '''
return self._serviceaccount
@serviceaccount.setter
def serviceaccount(self, config):
''' setter for property serviceaccount '''
self._serviceaccount = config
@property
def rolebinding(self):
''' property rolebinding '''
return self._rolebinding
@rolebinding.setter
def rolebinding(self, config):
''' setter for property rolebinding '''
self._rolebinding = config
def get_object_by_kind(self, kind):
'''return the current object kind by name'''
if re.match("^(dc|deploymentconfig)$", kind, flags=re.IGNORECASE):
return self.deploymentconfig
elif re.match("^(svc|service)$", kind, flags=re.IGNORECASE):
return self.service
elif re.match("^(sa|serviceaccount)$", kind, flags=re.IGNORECASE):
return self.serviceaccount
elif re.match("secret", kind, flags=re.IGNORECASE):
return self.secret
elif re.match("clusterrolebinding", kind, flags=re.IGNORECASE):
return self.rolebinding
return None
def get(self):
''' return the self.router_parts '''
self.service = None
self.deploymentconfig = None
self.serviceaccount = None
self.secret = None
self.rolebinding = None
for part in self.router_parts:
result = self._get(part['kind'], name=part['name'])
if result['returncode'] == 0 and part['kind'] == 'dc':
self.deploymentconfig = DeploymentConfig(result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'svc':
self.service = Service(content=result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'sa':
self.serviceaccount = ServiceAccount(content=result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'secret':
self.secret = Secret(content=result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'clusterrolebinding':
self.rolebinding = RoleBinding(content=result['results'][0])
return {'deploymentconfig': self.deploymentconfig,
'service': self.service,
'serviceaccount': self.serviceaccount,
'secret': self.secret,
'clusterrolebinding': self.rolebinding,
}
def exists(self):
'''return a whether svc or dc exists '''
if self.deploymentconfig and self.service and self.secret and self.serviceaccount:
return True
return False
def delete(self):
'''return all pods '''
parts = []
for part in self.router_parts:
parts.append(self._delete(part['kind'], part['name']))
rval = 0
for part in parts:
if part['returncode'] != 0 and not 'already exist' in part['stderr']:
rval = part['returncode']
return {'returncode': rval, 'results': parts}
def add_modifications(self, deploymentconfig):
'''modify the deployment config'''
# We want modifications in the form of edits coming in from the module.
# Let's apply these here
edit_results = []
for edit in self.config.config_options['edits'].get('value', []):
if edit['action'] == 'put':
edit_results.append(deploymentconfig.put(edit['key'],
edit['value']))
if edit['action'] == 'update':
edit_results.append(deploymentconfig.update(edit['key'],
edit['value'],
edit.get('index', None),
edit.get('curr_value', None)))
if edit['action'] == 'append':
edit_results.append(deploymentconfig.append(edit['key'],
edit['value']))
if edit_results and not any([res[0] for res in edit_results]):
return None
return deploymentconfig
# pylint: disable=too-many-branches
def _prepare_router(self):
'''prepare router for instantiation'''
# if cacert, key, and cert were passed, combine them into a pem file
if (self.config.config_options['cacert_file']['value'] and
self.config.config_options['cert_file']['value'] and
self.config.config_options['key_file']['value']):
router_pem = '/tmp/router.pem'
with open(router_pem, 'w') as rfd:
rfd.write(open(self.config.config_options['cert_file']['value']).read())
rfd.write(open(self.config.config_options['key_file']['value']).read())
if self.config.config_options['cacert_file']['value'] and \
os.path.exists(self.config.config_options['cacert_file']['value']):
rfd.write(open(self.config.config_options['cacert_file']['value']).read())
atexit.register(Utils.cleanup, [router_pem])
self.config.config_options['default_cert']['value'] = router_pem
elif self.config.config_options['default_cert']['value'] is None:
# No certificate was passed to us. do not pass one to oc adm router
self.config.config_options['default_cert']['include'] = False
options = self.config.to_option_list(ascommalist='labels')
cmd = ['router', self.config.name]
cmd.extend(options)
cmd.extend(['--dry-run=True', '-o', 'json'])
results = self.openshift_cmd(cmd, oadm=True, output=True, output_type='json')
# pylint: disable=maybe-no-member
if results['returncode'] != 0 or 'items' not in results['results']:
return results
oc_objects = {'DeploymentConfig': {'obj': None, 'path': None, 'update': False},
'Secret': {'obj': None, 'path': None, 'update': False},
'ServiceAccount': {'obj': None, 'path': None, 'update': False},
'ClusterRoleBinding': {'obj': None, 'path': None, 'update': False},
'Service': {'obj': None, 'path': None, 'update': False},
}
# pylint: disable=invalid-sequence-index
for res in results['results']['items']:
if res['kind'] == 'DeploymentConfig':
oc_objects['DeploymentConfig']['obj'] = DeploymentConfig(res)
elif res['kind'] == 'Service':
oc_objects['Service']['obj'] = Service(res)
elif res['kind'] == 'ServiceAccount':
oc_objects['ServiceAccount']['obj'] = ServiceAccount(res)
elif res['kind'] == 'Secret':
oc_objects['Secret']['obj'] = Secret(res)
elif res['kind'] == 'ClusterRoleBinding':
oc_objects['ClusterRoleBinding']['obj'] = RoleBinding(res)
# Currently only deploymentconfig needs updating
# Verify we got a deploymentconfig
if not oc_objects['DeploymentConfig']['obj']:
return results
# add modifications added
oc_objects['DeploymentConfig']['obj'] = self.add_modifications(oc_objects['DeploymentConfig']['obj'])
for oc_type, oc_data in oc_objects.items():
if oc_data['obj'] is not None:
oc_data['path'] = Utils.create_tmp_file_from_contents(oc_type, oc_data['obj'].yaml_dict)
return oc_objects
def create(self):
'''Create a router
This includes the different parts:
- deploymentconfig
- service
- serviceaccount
- secrets
- clusterrolebinding
'''
results = []
self.needs_update()
# pylint: disable=maybe-no-member
for kind, oc_data in self.prepared_router.items():
if oc_data['obj'] is not None:
time.sleep(1)
if self.get_object_by_kind(kind) is None:
results.append(self._create(oc_data['path']))
elif oc_data['update']:
results.append(self._replace(oc_data['path']))
rval = 0
for result in results:
if result['returncode'] != 0 and not 'already exist' in result['stderr']:
rval = result['returncode']
return {'returncode': rval, 'results': results}
def update(self):
'''run update for the router. This performs a replace'''
results = []
# pylint: disable=maybe-no-member
for _, oc_data in self.prepared_router.items():
if oc_data['update']:
results.append(self._replace(oc_data['path']))
rval = 0
for result in results:
if result['returncode'] != 0:
rval = result['returncode']
return {'returncode': rval, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
def needs_update(self):
''' check to see if we need to update '''
# ServiceAccount:
# Need to determine changes from the pregenerated ones from the original
# Since these are auto generated, we can skip
skip = ['secrets', 'imagePullSecrets']
if self.serviceaccount is None or \
not Utils.check_def_equal(self.prepared_router['ServiceAccount']['obj'].yaml_dict,
self.serviceaccount.yaml_dict,
skip_keys=skip,
debug=self.verbose):
self.prepared_router['ServiceAccount']['update'] = True
# Secret:
# See if one was generated from our dry-run and verify it if needed
if self.prepared_router['Secret']['obj']:
if not self.secret:
self.prepared_router['Secret']['update'] = True
if self.secret is None or \
not Utils.check_def_equal(self.prepared_router['Secret']['obj'].yaml_dict,
self.secret.yaml_dict,
skip_keys=skip,
debug=self.verbose):
self.prepared_router['Secret']['update'] = True
# Service:
# Fix the ports to have protocol=TCP
for port in self.prepared_router['Service']['obj'].get('spec.ports'):
port['protocol'] = 'TCP'
skip = ['portalIP', 'clusterIP', 'sessionAffinity', 'type']
if self.service is None or \
not Utils.check_def_equal(self.prepared_router['Service']['obj'].yaml_dict,
self.service.yaml_dict,
skip_keys=skip,
debug=self.verbose):
self.prepared_router['Service']['update'] = True
# DeploymentConfig:
# Router needs some exceptions.
# We do not want to check the autogenerated password for stats admin
if self.deploymentconfig is not None:
if not self.config.config_options['stats_password']['value']:
for idx, env_var in enumerate(self.prepared_router['DeploymentConfig']['obj'].get(\
'spec.template.spec.containers[0].env') or []):
if env_var['name'] == 'STATS_PASSWORD':
env_var['value'] = \
self.deploymentconfig.get('spec.template.spec.containers[0].env[%s].value' % idx)
break
# dry-run doesn't add the protocol to the ports section. We will manually do that.
for idx, port in enumerate(self.prepared_router['DeploymentConfig']['obj'].get(\
'spec.template.spec.containers[0].ports') or []):
if not 'protocol' in port:
port['protocol'] = 'TCP'
# These are different when generating
skip = ['dnsPolicy',
'terminationGracePeriodSeconds',
'restartPolicy', 'timeoutSeconds',
'livenessProbe', 'readinessProbe',
'terminationMessagePath', 'hostPort',
'defaultMode',
]
if self.deploymentconfig is None or \
not Utils.check_def_equal(self.prepared_router['DeploymentConfig']['obj'].yaml_dict,
self.deploymentconfig.yaml_dict,
skip_keys=skip,
debug=self.verbose):
self.prepared_router['DeploymentConfig']['update'] = True
# Check if any of the parts need updating, if so, return True
# else, no need to update
# pylint: disable=no-member
return any([self.prepared_router[oc_type]['update'] for oc_type in self.prepared_router.keys()])
@staticmethod
def run_ansible(params, check_mode):
'''run ansible idempotent code'''
rconfig = RouterConfig(params['name'],
params['namespace'],
params['kubeconfig'],
{'default_cert': {'value': params['default_cert'], 'include': True},
'cert_file': {'value': params['cert_file'], 'include': False},
'key_file': {'value': params['key_file'], 'include': False},
'images': {'value': params['images'], 'include': True},
'latest_images': {'value': params['latest_images'], 'include': True},
'labels': {'value': params['labels'], 'include': True},
'ports': {'value': ','.join(params['ports']), 'include': True},
'replicas': {'value': params['replicas'], 'include': True},
'selector': {'value': params['selector'], 'include': True},
'service_account': {'value': params['service_account'], 'include': True},
'router_type': {'value': params['router_type'], 'include': False},
'host_network': {'value': params['host_network'], 'include': True},
'external_host': {'value': params['external_host'], 'include': True},
'external_host_vserver': {'value': params['external_host_vserver'],
'include': True},
'external_host_insecure': {'value': params['external_host_insecure'],
'include': True},
'external_host_partition_path': {'value': params['external_host_partition_path'],
'include': True},
'external_host_username': {'value': params['external_host_username'],
'include': True},
'external_host_password': {'value': params['external_host_password'],
'include': True},
'external_host_private_key': {'value': params['external_host_private_key'],
'include': True},
'stats_user': {'value': params['stats_user'], 'include': True},
'stats_password': {'value': params['stats_password'], 'include': True},
'stats_port': {'value': params['stats_port'], 'include': True},
# extra
'cacert_file': {'value': params['cacert_file'], 'include': False},
# edits
'edits': {'value': params['edits'], 'include': False},
})
state = params['state']
ocrouter = Router(rconfig, verbose=params['debug'])
api_rval = ocrouter.get()
########
# get
########
if state == 'list':
return {'changed': False, 'results': api_rval, 'state': state}
########
# Delete
########
if state == 'absent':
if not ocrouter.exists():
return {'changed': False, 'state': state}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}
# In case of delete we return a list of each object
# that represents a router and its result in a list
# pylint: disable=redefined-variable-type
api_rval = ocrouter.delete()
return {'changed': True, 'results': api_rval, 'state': state}
if state == 'present':
########
# Create
########
if not ocrouter.exists():
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}
api_rval = ocrouter.create()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
########
# Update
########
if not ocrouter.needs_update():
return {'changed': False, 'state': state}
if check_mode:
return {'changed': False, 'msg': 'CHECK_MODE: Would have performed an update.'}
api_rval = ocrouter.update()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
# -*- -*- -*- End included fragment: class/oc_adm_router.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_adm_router.py -*- -*- -*-
def main():
'''
ansible oc module for router
'''
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', type='str',
choices=['present', 'absent']),
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default='router', type='str'),
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
default_cert=dict(default=None, type='str'),
cert_file=dict(default=None, type='str'),
key_file=dict(default=None, type='str'),
images=dict(default=None, type='str'), #'openshift3/ose-${component}:${version}'
latest_images=dict(default=False, type='bool'),
labels=dict(default=None, type='dict'),
ports=dict(default=['80:80', '443:443'], type='list'),
replicas=dict(default=1, type='int'),
selector=dict(default=None, type='str'),
service_account=dict(default='router', type='str'),
router_type=dict(default='haproxy-router', type='str'),
host_network=dict(default=True, type='bool'),
# external host options
external_host=dict(default=None, type='str'),
external_host_vserver=dict(default=None, type='str'),
external_host_insecure=dict(default=False, type='bool'),
external_host_partition_path=dict(default=None, type='str'),
external_host_username=dict(default=None, type='str'),
external_host_password=dict(default=None, type='str', no_log=True),
external_host_private_key=dict(default=None, type='str', no_log=True),
# Stats
stats_user=dict(default=None, type='str'),
stats_password=dict(default=None, type='str', no_log=True),
stats_port=dict(default=1936, type='int'),
# extra
cacert_file=dict(default=None, type='str'),
# edits
edits=dict(default=[], type='list'),
),
mutually_exclusive=[["router_type", "images"],
["key_file", "default_cert"],
["cert_file", "default_cert"],
["cacert_file", "default_cert"],
],
required_together=[['cacert_file', 'cert_file', 'key_file']],
supports_check_mode=True,
)
results = Router.run_ansible(module.params, module.check_mode)
if 'failed' in results:
module.fail_json(**results)
module.exit_json(**results)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_adm_router.py -*- -*- -*-
| apache-2.0 |
dcroc16/skunk_works | google_appengine/lib/django-1.4/tests/modeltests/proxy_models/tests.py | 33 | 12189 | from __future__ import absolute_import
from django.contrib.contenttypes.models import ContentType
from django.core import management
from django.core.exceptions import FieldError
from django.db import models, DEFAULT_DB_ALIAS
from django.db.models import signals
from django.test import TestCase
from .models import (MyPerson, Person, StatusPerson, LowerStatusPerson,
MyPersonProxy, Abstract, OtherPerson, User, UserProxy, UserProxyProxy,
Country, State, StateProxy, TrackerUser, BaseUser, Bug, ProxyTrackerUser,
Improvement, ProxyProxyBug, ProxyBug, ProxyImprovement)
class ProxyModelTests(TestCase):
def test_same_manager_queries(self):
"""
The MyPerson model should be generating the same database queries as
the Person model (when the same manager is used in each case).
"""
my_person_sql = MyPerson.other.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
person_sql = Person.objects.order_by("name").query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
self.assertEqual(my_person_sql, person_sql)
def test_inheretance_new_table(self):
"""
The StatusPerson models should have its own table (it's using ORM-level
inheritance).
"""
sp_sql = StatusPerson.objects.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
p_sql = Person.objects.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
self.assertNotEqual(sp_sql, p_sql)
def test_basic_proxy(self):
"""
Creating a Person makes them accessible through the MyPerson proxy.
"""
person = Person.objects.create(name="Foo McBar")
self.assertEqual(len(Person.objects.all()), 1)
self.assertEqual(len(MyPerson.objects.all()), 1)
self.assertEqual(MyPerson.objects.get(name="Foo McBar").id, person.id)
self.assertFalse(MyPerson.objects.get(id=person.id).has_special_name())
def test_no_proxy(self):
"""
Person is not proxied by StatusPerson subclass.
"""
Person.objects.create(name="Foo McBar")
self.assertEqual(list(StatusPerson.objects.all()), [])
def test_basic_proxy_reverse(self):
"""
A new MyPerson also shows up as a standard Person.
"""
MyPerson.objects.create(name="Bazza del Frob")
self.assertEqual(len(MyPerson.objects.all()), 1)
self.assertEqual(len(Person.objects.all()), 1)
LowerStatusPerson.objects.create(status="low", name="homer")
lsps = [lsp.name for lsp in LowerStatusPerson.objects.all()]
self.assertEqual(lsps, ["homer"])
def test_correct_type_proxy_of_proxy(self):
"""
Correct type when querying a proxy of proxy
"""
Person.objects.create(name="Foo McBar")
MyPerson.objects.create(name="Bazza del Frob")
LowerStatusPerson.objects.create(status="low", name="homer")
pp = sorted([mpp.name for mpp in MyPersonProxy.objects.all()])
self.assertEqual(pp, ['Bazza del Frob', 'Foo McBar', 'homer'])
def test_proxy_included_in_ancestors(self):
"""
Proxy models are included in the ancestors for a model's DoesNotExist
and MultipleObjectsReturned
"""
Person.objects.create(name="Foo McBar")
MyPerson.objects.create(name="Bazza del Frob")
LowerStatusPerson.objects.create(status="low", name="homer")
max_id = Person.objects.aggregate(max_id=models.Max('id'))['max_id']
self.assertRaises(Person.DoesNotExist,
MyPersonProxy.objects.get,
name='Zathras'
)
self.assertRaises(Person.MultipleObjectsReturned,
MyPersonProxy.objects.get,
id__lt=max_id+1
)
self.assertRaises(Person.DoesNotExist,
StatusPerson.objects.get,
name='Zathras'
)
sp1 = StatusPerson.objects.create(name='Bazza Jr.')
sp2 = StatusPerson.objects.create(name='Foo Jr.')
max_id = Person.objects.aggregate(max_id=models.Max('id'))['max_id']
self.assertRaises(Person.MultipleObjectsReturned,
StatusPerson.objects.get,
id__lt=max_id+1
)
def test_abc(self):
"""
All base classes must be non-abstract
"""
def build_abc():
class NoAbstract(Abstract):
class Meta:
proxy = True
self.assertRaises(TypeError, build_abc)
def test_no_cbc(self):
"""
The proxy must actually have one concrete base class
"""
def build_no_cbc():
class TooManyBases(Person, Abstract):
class Meta:
proxy = True
self.assertRaises(TypeError, build_no_cbc)
def test_no_base_classes(self):
def build_no_base_classes():
class NoBaseClasses(models.Model):
class Meta:
proxy = True
self.assertRaises(TypeError, build_no_base_classes)
def test_new_fields(self):
def build_new_fields():
class NoNewFields(Person):
newfield = models.BooleanField()
class Meta:
proxy = True
self.assertRaises(FieldError, build_new_fields)
def test_myperson_manager(self):
Person.objects.create(name="fred")
Person.objects.create(name="wilma")
Person.objects.create(name="barney")
resp = [p.name for p in MyPerson.objects.all()]
self.assertEqual(resp, ['barney', 'fred'])
resp = [p.name for p in MyPerson._default_manager.all()]
self.assertEqual(resp, ['barney', 'fred'])
def test_otherperson_manager(self):
Person.objects.create(name="fred")
Person.objects.create(name="wilma")
Person.objects.create(name="barney")
resp = [p.name for p in OtherPerson.objects.all()]
self.assertEqual(resp, ['barney', 'wilma'])
resp = [p.name for p in OtherPerson.excluder.all()]
self.assertEqual(resp, ['barney', 'fred'])
resp = [p.name for p in OtherPerson._default_manager.all()]
self.assertEqual(resp, ['barney', 'wilma'])
def test_permissions_created(self):
from django.contrib.auth.models import Permission
try:
Permission.objects.get(name="May display users information")
except Permission.DoesNotExist:
self.fail("The permission 'May display users information' has not been created")
def test_proxy_model_signals(self):
"""
Test save signals for proxy models
"""
output = []
def make_handler(model, event):
def _handler(*args, **kwargs):
output.append('%s %s save' % (model, event))
return _handler
h1 = make_handler('MyPerson', 'pre')
h2 = make_handler('MyPerson', 'post')
h3 = make_handler('Person', 'pre')
h4 = make_handler('Person', 'post')
signals.pre_save.connect(h1, sender=MyPerson)
signals.post_save.connect(h2, sender=MyPerson)
signals.pre_save.connect(h3, sender=Person)
signals.post_save.connect(h4, sender=Person)
dino = MyPerson.objects.create(name=u"dino")
self.assertEqual(output, [
'MyPerson pre save',
'MyPerson post save'
])
output = []
h5 = make_handler('MyPersonProxy', 'pre')
h6 = make_handler('MyPersonProxy', 'post')
signals.pre_save.connect(h5, sender=MyPersonProxy)
signals.post_save.connect(h6, sender=MyPersonProxy)
dino = MyPersonProxy.objects.create(name=u"pebbles")
self.assertEqual(output, [
'MyPersonProxy pre save',
'MyPersonProxy post save'
])
signals.pre_save.disconnect(h1, sender=MyPerson)
signals.post_save.disconnect(h2, sender=MyPerson)
signals.pre_save.disconnect(h3, sender=Person)
signals.post_save.disconnect(h4, sender=Person)
signals.pre_save.disconnect(h5, sender=MyPersonProxy)
signals.post_save.disconnect(h6, sender=MyPersonProxy)
def test_content_type(self):
ctype = ContentType.objects.get_for_model
self.assertTrue(ctype(Person) is ctype(OtherPerson))
def test_user_userproxy_userproxyproxy(self):
User.objects.create(name='Bruce')
resp = [u.name for u in User.objects.all()]
self.assertEqual(resp, ['Bruce'])
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
resp = [u.name for u in UserProxyProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
def test_proxy_for_model(self):
self.assertEqual(UserProxy, UserProxyProxy._meta.proxy_for_model)
def test_concrete_model(self):
self.assertEqual(User, UserProxyProxy._meta.concrete_model)
def test_proxy_delete(self):
"""
Proxy objects can be deleted
"""
User.objects.create(name='Bruce')
u2 = UserProxy.objects.create(name='George')
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce', 'George'])
u2.delete()
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
def test_select_related(self):
"""
We can still use `select_related()` to include related models in our
querysets.
"""
country = Country.objects.create(name='Australia')
state = State.objects.create(name='New South Wales', country=country)
resp = [s.name for s in State.objects.select_related()]
self.assertEqual(resp, ['New South Wales'])
resp = [s.name for s in StateProxy.objects.select_related()]
self.assertEqual(resp, ['New South Wales'])
self.assertEqual(StateProxy.objects.get(name='New South Wales').name,
'New South Wales')
resp = StateProxy.objects.select_related().get(name='New South Wales')
self.assertEqual(resp.name, 'New South Wales')
def test_proxy_bug(self):
contributor = TrackerUser.objects.create(name='Contributor',
status='contrib')
someone = BaseUser.objects.create(name='Someone')
Bug.objects.create(summary='fix this', version='1.1beta',
assignee=contributor, reporter=someone)
pcontributor = ProxyTrackerUser.objects.create(name='OtherContributor',
status='proxy')
Improvement.objects.create(summary='improve that', version='1.1beta',
assignee=contributor, reporter=pcontributor,
associated_bug=ProxyProxyBug.objects.all()[0])
# Related field filter on proxy
resp = ProxyBug.objects.get(version__icontains='beta')
self.assertEqual(repr(resp), '<ProxyBug: ProxyBug:fix this>')
# Select related + filter on proxy
resp = ProxyBug.objects.select_related().get(version__icontains='beta')
self.assertEqual(repr(resp), '<ProxyBug: ProxyBug:fix this>')
# Proxy of proxy, select_related + filter
resp = ProxyProxyBug.objects.select_related().get(
version__icontains='beta'
)
self.assertEqual(repr(resp), '<ProxyProxyBug: ProxyProxyBug:fix this>')
# Select related + filter on a related proxy field
resp = ProxyImprovement.objects.select_related().get(
reporter__name__icontains='butor'
)
self.assertEqual(repr(resp),
'<ProxyImprovement: ProxyImprovement:improve that>'
)
# Select related + filter on a related proxy of proxy field
resp = ProxyImprovement.objects.select_related().get(
associated_bug__summary__icontains='fix'
)
self.assertEqual(repr(resp),
'<ProxyImprovement: ProxyImprovement:improve that>'
)
def test_proxy_load_from_fixture(self):
management.call_command('loaddata', 'mypeople.json', verbosity=0, commit=False)
p = MyPerson.objects.get(pk=100)
self.assertEqual(p.name, 'Elvis Presley')
| mit |
SpiderNight/irc-to-discord | irc-to-discord.py | 1 | 2921 | import discord
import asyncio
import logging
import sys
import time
import threading
import json
import uniirc
import uniformatter
print(sys.version)
print(discord.__version__)
logger = logging.getLogger('discord')
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w')
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)
with open("config.json") as fp:
config = json.load(fp)
chan_pairs = [ (pair["irc_channel"], pair["discord_channel"]) for pair in config["pairs"] ]
client = discord.Client()
irc_client = uniirc.IRCClient(chan_pairs=chan_pairs, config=config["irc"], discord_client=client)
irc_thread = None
#notifying console that bot is logged in
@client.event
async def on_ready():
print("Logged into discord as user: {}".format(client.user.name))
# discord login successful so we can connect to IRC
print("Starting IRC...")
global irc_thread
irc_thread = threading.Thread(target=irc_client.irc_run, daemon=True)
irc_thread.start()
default_status = "with your messages"
print("Setting default status: {}".format(default_status))
await client.change_presence(activity=discord.Game(name=default_status))
return
#on message recieved, execute this block
@client.event
async def on_message(msg):
for chan in chan_pairs:
if msg.channel.id == chan[1] and msg.author.id != 263688414296145920: #is bridge channel and not uni herself
await msg_process(msg, chan[0])
return
async def msg_process(msg, chan):
#Nickname check
if msg.author.nick:
author = msg.author.nick
else:
author = msg.author.name
#Formatting
clean_msg = uniformatter.discordToIrc(msg.clean_content)
#Sending
author = author[:1] + u'\u200b' + author[1:]
colour = str((sum([ ord(x) for x in author ]) % 12) + 2) #get seeded random num between 2-13
if len(colour) == 1:
colour = "0" + colour
if clean_msg:
irc_client.send_message(chan, "<\x03{}{}\x03> {}".format(colour, author, clean_msg))
for attachment in msg.attachments:
irc_client.send_message(chan, "<\x03{}{}\x03> \x02{}:\x0F {}".format(colour, author, attachment["filename"], attachment["url"]))
for embed in msg.embeds:
irc_client.send_message(chan, "<\x03{}{}\x03> \x02{}:\x0F {}".format(colour, author, embed["title"], embed["url"]))
return
#if irc thread has died then main program exits too
async def irc_checker():
await client.wait_until_ready()
while not client.is_closed:
if irc_thread and not irc_thread.is_alive():
exit("IRC client disconnected. Exiting...")
await asyncio.sleep(10)
return
print("Starting Discord...")
loop = asyncio.get_event_loop()
try:
loop.create_task(irc_checker())
loop.run_until_complete(client.login(config["discord"]["login_token"]))
loop.run_until_complete(client.connect())
except Exception:
loop.run_until_complete(client.close())
finally:
loop.close()
| mit |
akappner/ios | automation_Test/actions.py | 14 | 4037 | # -*- coding: utf-8 -*-
#!/usr/bin/env python
import time
import os
from time import sleep
import constants as const
import loginView
import helpGuideView
import filesView
from appium import webdriver
def getWebDriver():
wd = None
desired_caps = {}
desired_caps['appium-version'] = const.K_APPIUM_VER
desired_caps['platformName'] = const.K_APP_PLATFORM_NAME
desired_caps['platformVersion'] = const.K_APP_PLATFORM_VER
desired_caps['deviceName'] = const.K_DEVICE_NAME
desired_caps['app'] = os.path.dirname(os.path.realpath(__file__)) + const.K_APP_FILE_NAME
wd = webdriver.Remote(const.K_WD_REMOTE_URL, desired_caps)
wd.implicitly_wait(const.K_WD_EXPLICITY_WAIT)
return wd
def doFirstLoginWith(self,server,user,password, ssl):
driver = self.driver
skipButtonInHelpGuide = driver.find_elements_by_class_name(helpGuideView.skipButton_class)[helpGuideView.skipButton_index]
self.assertEqual(skipButtonInHelpGuide.get_attribute("name"), helpGuideView.skipButton_name)
skipButtonInHelpGuide.click()
doLoginWith(self,server,user,password,ssl)
def doLoginWith(self,server,user,password, ssl):
driver = self.driver
user_field = driver.find_elements_by_class_name(loginView.user_field_class)[loginView.user_field_index]
user_field.clear()
user_field.set_value(user)
pass_field = driver.find_elements_by_class_name(loginView.pass_field_class)[loginView.pass_field_index]
pass_field.clear()
pass_field.set_value(password)
url_field = driver.find_elements_by_class_name(loginView.url_field_class)[loginView.url_field_index]
url_field.clear()
url_field.set_value(server)
table_view = driver.find_elements_by_class_name(loginView.table_view_class)[loginView.table_view_index]
table_view.click()
sleep(3)
if ssl == True:
ok_button_alert_view = driver.find_element_by_xpath(loginView.ok_button_alert_view_xpath)
ok_button_alert_view.click()
sleep(3)
if ssl == True:
text_to_check = loginView.secure_connection_name
else:
text_to_check = loginView.connection_name
time_out = 60
sleep_time = 1
class_to_check = loginView.connection_field_class
index_to_check = loginView.connection_field_index
wait_until(check_values_by_name, time_out, sleep_time, driver, class_to_check, index_to_check, text_to_check)
self.assertEqual(driver.find_elements_by_class_name(class_to_check)[index_to_check].get_attribute("name"), text_to_check)
login_button = driver.find_elements_by_class_name(loginView.login_button_class)[loginView.login_button_index]
login_button.click()
def wait_until(some_method, timeout, period=0.25, *args, **kwargs):
mustend = time.time() + timeout
while time.time() < mustend:
if some_method(*args, **kwargs): return True
time.sleep(period)
return False
def check_values_by_class_name(driver, class_name, exp_value):
number_of_classes = len(driver.find_elements_by_class_name(class_name))
if number_of_classes == exp_value: return True
return False
def check_values_by_name(driver, class_name, index, exp_name):
if driver.find_elements_by_class_name(class_name)[index].get_attribute("name") == exp_name: return True
return False
def assert_is_in_files_view(self):
sleep(1)
class_to_check = filesView.tabBar_class
time_out = 20
sleep_time = 1
expected_class_found = 1
wait_until(check_values_by_class_name, time_out, sleep_time, self.driver, class_to_check, expected_class_found)
self.assertTrue(check_values_by_class_name(self.driver, class_to_check, expected_class_found))
def assert_is_not_in_files_view(self):
sleep(1)
class_to_check = filesView.tabBar_class
time_out = 20
sleep_time = 1
expected_class_found = 0
wait_until(check_values_by_class_name, time_out, sleep_time, self.driver, class_to_check, expected_class_found)
self.assertTrue(check_values_by_class_name(self.driver, class_to_check, expected_class_found))
| gpl-3.0 |
dannyboi104/SickRage | lib/unidecode/x0c7.py | 253 | 4564 | data = (
'wek', # 0x00
'wet', # 0x01
'wep', # 0x02
'weh', # 0x03
'wi', # 0x04
'wig', # 0x05
'wigg', # 0x06
'wigs', # 0x07
'win', # 0x08
'winj', # 0x09
'winh', # 0x0a
'wid', # 0x0b
'wil', # 0x0c
'wilg', # 0x0d
'wilm', # 0x0e
'wilb', # 0x0f
'wils', # 0x10
'wilt', # 0x11
'wilp', # 0x12
'wilh', # 0x13
'wim', # 0x14
'wib', # 0x15
'wibs', # 0x16
'wis', # 0x17
'wiss', # 0x18
'wing', # 0x19
'wij', # 0x1a
'wic', # 0x1b
'wik', # 0x1c
'wit', # 0x1d
'wip', # 0x1e
'wih', # 0x1f
'yu', # 0x20
'yug', # 0x21
'yugg', # 0x22
'yugs', # 0x23
'yun', # 0x24
'yunj', # 0x25
'yunh', # 0x26
'yud', # 0x27
'yul', # 0x28
'yulg', # 0x29
'yulm', # 0x2a
'yulb', # 0x2b
'yuls', # 0x2c
'yult', # 0x2d
'yulp', # 0x2e
'yulh', # 0x2f
'yum', # 0x30
'yub', # 0x31
'yubs', # 0x32
'yus', # 0x33
'yuss', # 0x34
'yung', # 0x35
'yuj', # 0x36
'yuc', # 0x37
'yuk', # 0x38
'yut', # 0x39
'yup', # 0x3a
'yuh', # 0x3b
'eu', # 0x3c
'eug', # 0x3d
'eugg', # 0x3e
'eugs', # 0x3f
'eun', # 0x40
'eunj', # 0x41
'eunh', # 0x42
'eud', # 0x43
'eul', # 0x44
'eulg', # 0x45
'eulm', # 0x46
'eulb', # 0x47
'euls', # 0x48
'eult', # 0x49
'eulp', # 0x4a
'eulh', # 0x4b
'eum', # 0x4c
'eub', # 0x4d
'eubs', # 0x4e
'eus', # 0x4f
'euss', # 0x50
'eung', # 0x51
'euj', # 0x52
'euc', # 0x53
'euk', # 0x54
'eut', # 0x55
'eup', # 0x56
'euh', # 0x57
'yi', # 0x58
'yig', # 0x59
'yigg', # 0x5a
'yigs', # 0x5b
'yin', # 0x5c
'yinj', # 0x5d
'yinh', # 0x5e
'yid', # 0x5f
'yil', # 0x60
'yilg', # 0x61
'yilm', # 0x62
'yilb', # 0x63
'yils', # 0x64
'yilt', # 0x65
'yilp', # 0x66
'yilh', # 0x67
'yim', # 0x68
'yib', # 0x69
'yibs', # 0x6a
'yis', # 0x6b
'yiss', # 0x6c
'ying', # 0x6d
'yij', # 0x6e
'yic', # 0x6f
'yik', # 0x70
'yit', # 0x71
'yip', # 0x72
'yih', # 0x73
'i', # 0x74
'ig', # 0x75
'igg', # 0x76
'igs', # 0x77
'in', # 0x78
'inj', # 0x79
'inh', # 0x7a
'id', # 0x7b
'il', # 0x7c
'ilg', # 0x7d
'ilm', # 0x7e
'ilb', # 0x7f
'ils', # 0x80
'ilt', # 0x81
'ilp', # 0x82
'ilh', # 0x83
'im', # 0x84
'ib', # 0x85
'ibs', # 0x86
'is', # 0x87
'iss', # 0x88
'ing', # 0x89
'ij', # 0x8a
'ic', # 0x8b
'ik', # 0x8c
'it', # 0x8d
'ip', # 0x8e
'ih', # 0x8f
'ja', # 0x90
'jag', # 0x91
'jagg', # 0x92
'jags', # 0x93
'jan', # 0x94
'janj', # 0x95
'janh', # 0x96
'jad', # 0x97
'jal', # 0x98
'jalg', # 0x99
'jalm', # 0x9a
'jalb', # 0x9b
'jals', # 0x9c
'jalt', # 0x9d
'jalp', # 0x9e
'jalh', # 0x9f
'jam', # 0xa0
'jab', # 0xa1
'jabs', # 0xa2
'jas', # 0xa3
'jass', # 0xa4
'jang', # 0xa5
'jaj', # 0xa6
'jac', # 0xa7
'jak', # 0xa8
'jat', # 0xa9
'jap', # 0xaa
'jah', # 0xab
'jae', # 0xac
'jaeg', # 0xad
'jaegg', # 0xae
'jaegs', # 0xaf
'jaen', # 0xb0
'jaenj', # 0xb1
'jaenh', # 0xb2
'jaed', # 0xb3
'jael', # 0xb4
'jaelg', # 0xb5
'jaelm', # 0xb6
'jaelb', # 0xb7
'jaels', # 0xb8
'jaelt', # 0xb9
'jaelp', # 0xba
'jaelh', # 0xbb
'jaem', # 0xbc
'jaeb', # 0xbd
'jaebs', # 0xbe
'jaes', # 0xbf
'jaess', # 0xc0
'jaeng', # 0xc1
'jaej', # 0xc2
'jaec', # 0xc3
'jaek', # 0xc4
'jaet', # 0xc5
'jaep', # 0xc6
'jaeh', # 0xc7
'jya', # 0xc8
'jyag', # 0xc9
'jyagg', # 0xca
'jyags', # 0xcb
'jyan', # 0xcc
'jyanj', # 0xcd
'jyanh', # 0xce
'jyad', # 0xcf
'jyal', # 0xd0
'jyalg', # 0xd1
'jyalm', # 0xd2
'jyalb', # 0xd3
'jyals', # 0xd4
'jyalt', # 0xd5
'jyalp', # 0xd6
'jyalh', # 0xd7
'jyam', # 0xd8
'jyab', # 0xd9
'jyabs', # 0xda
'jyas', # 0xdb
'jyass', # 0xdc
'jyang', # 0xdd
'jyaj', # 0xde
'jyac', # 0xdf
'jyak', # 0xe0
'jyat', # 0xe1
'jyap', # 0xe2
'jyah', # 0xe3
'jyae', # 0xe4
'jyaeg', # 0xe5
'jyaegg', # 0xe6
'jyaegs', # 0xe7
'jyaen', # 0xe8
'jyaenj', # 0xe9
'jyaenh', # 0xea
'jyaed', # 0xeb
'jyael', # 0xec
'jyaelg', # 0xed
'jyaelm', # 0xee
'jyaelb', # 0xef
'jyaels', # 0xf0
'jyaelt', # 0xf1
'jyaelp', # 0xf2
'jyaelh', # 0xf3
'jyaem', # 0xf4
'jyaeb', # 0xf5
'jyaebs', # 0xf6
'jyaes', # 0xf7
'jyaess', # 0xf8
'jyaeng', # 0xf9
'jyaej', # 0xfa
'jyaec', # 0xfb
'jyaek', # 0xfc
'jyaet', # 0xfd
'jyaep', # 0xfe
'jyaeh', # 0xff
)
| gpl-3.0 |
monikasulik/django-oscar | src/oscar/apps/address/migrations/0001_initial.py | 58 | 4480 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import oscar.models.fields
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Country',
fields=[
('iso_3166_1_a2', models.CharField(primary_key=True, max_length=2, verbose_name='ISO 3166-1 alpha-2', serialize=False)),
('iso_3166_1_a3', models.CharField(max_length=3, verbose_name='ISO 3166-1 alpha-3', blank=True)),
('iso_3166_1_numeric', models.CharField(max_length=3, verbose_name='ISO 3166-1 numeric', blank=True)),
('printable_name', models.CharField(max_length=128, verbose_name='Country name')),
('name', models.CharField(max_length=128, verbose_name='Official name')),
('display_order', models.PositiveSmallIntegerField(default=0, verbose_name='Display order', db_index=True, help_text='Higher the number, higher the country in the list.')),
('is_shipping_country', models.BooleanField(default=False, db_index=True, verbose_name='Is shipping country')),
],
options={
'ordering': ('-display_order', 'printable_name'),
'verbose_name_plural': 'Countries',
'verbose_name': 'Country',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='UserAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(verbose_name='Title', max_length=64, blank=True, choices=[('Mr', 'Mr'), ('Miss', 'Miss'), ('Mrs', 'Mrs'), ('Ms', 'Ms'), ('Dr', 'Dr')])),
('first_name', models.CharField(max_length=255, verbose_name='First name', blank=True)),
('last_name', models.CharField(max_length=255, verbose_name='Last name', blank=True)),
('line1', models.CharField(max_length=255, verbose_name='First line of address')),
('line2', models.CharField(max_length=255, verbose_name='Second line of address', blank=True)),
('line3', models.CharField(max_length=255, verbose_name='Third line of address', blank=True)),
('line4', models.CharField(max_length=255, verbose_name='City', blank=True)),
('state', models.CharField(max_length=255, verbose_name='State/County', blank=True)),
('postcode', oscar.models.fields.UppercaseCharField(max_length=64, verbose_name='Post/Zip-code', blank=True)),
('search_text', models.TextField(editable=False, verbose_name='Search text - used only for searching addresses')),
('phone_number', oscar.models.fields.PhoneNumberField(verbose_name='Phone number', help_text='In case we need to call you about your order', blank=True)),
('notes', models.TextField(verbose_name='Instructions', help_text='Tell us anything we should know when delivering your order.', blank=True)),
('is_default_for_shipping', models.BooleanField(default=False, verbose_name='Default shipping address?')),
('is_default_for_billing', models.BooleanField(default=False, verbose_name='Default billing address?')),
('num_orders', models.PositiveIntegerField(default=0, verbose_name='Number of Orders')),
('hash', models.CharField(max_length=255, editable=False, db_index=True, verbose_name='Address Hash')),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date Created')),
('country', models.ForeignKey(verbose_name='Country', to='address.Country')),
('user', models.ForeignKey(verbose_name='User', related_name='addresses', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-num_orders'],
'verbose_name_plural': 'User addresses',
'verbose_name': 'User address',
'abstract': False,
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='useraddress',
unique_together=set([('user', 'hash')]),
),
]
| bsd-3-clause |
amcdunn/CloudMining | csv/reload.py | 1 | 1152 | import csv
import MySQLdb
import getpass
import traceback
mydb = MySQLdb.connect(host='sql.mit.edu',
user='amcdunn',
passwd=getpass.getpass("Password for sql.mit.edu:"),
db='amcdunn+LL_dictionary')
cursor = mydb.cursor()
test_csv_path = "reload_test.csv"
num_header_rows = 3
tbl_name = "reload_test"
create = "DROP TABLE IF EXISTS {0}; \
CREATE TABLE {0} (".format(tbl_name)
csv_data = csv.reader(file(test_csv_path,'rU'))
header_rows = []
for counter, row in enumerate(csv_data):
header_rows.append(row)
if counter==num_header_rows-1:
break
col_data = zip(*header_rows)
for col in col_data:
create += "{} {} COMMENT \'{}\',".format(*col)
create = create[:-1]+") CHARACTER SET ascii; "
print("Command sent to MySQL: "+create)
cursor.execute(create)
cursor.close()
#done with create table
cursor = mydb.cursor()
for row in csv_data:
try:
cursor.execute('INSERT INTO {}'.format(tbl_name)+" VALUES{}".format(tuple(row)))
mydb.commit()
except Exception:
traceback.print_exc()
print("Error adding this row to database:\n"+" ".join(row))
cursor.close()
print "Done"
| agpl-3.0 |
jnvandermeer/PythonFeedback | idlex-1.11.2/build/lib/idlexlib/extensions/SearchBar.py | 3 | 35050 | # IDLEX EXTENSION
## """SearchBar.py - An IDLE extension for searching for text in windows.
##
## Copyright (c) 2011 Tal Einat
## All rights reserved.
##
## Developed by: Tal Einat
##
## Permission is hereby granted, free of charge, to any person obtaining a
## copy of this software and associated documentation files (the "Software"),
## to deal with the Software without restriction, including without limitation
## the rights to use, copy, modify, merge, publish, distribute, sublicense,
## and/or sell copies of the Software, and to permit persons to whom the
## Software is furnished to do so, subject to the following conditions:
##
## Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimers.
##
## Redistributions in binary form must reproduce the above copyright notice,
## this list of conditions and the following disclaimers in the documentation
## and/or other materials provided with the distribution.
##
## Neither the name of Tal Einat, nor the names of its contributors may be
## used to endorse or promote products derived from this Software without
## specific prior written permission.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
## EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
## MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
## IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR
## ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
## TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
## OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE.
##
##
##
## The interface is a small bar which appears on the bottom of the window,
## and dissapears when the user stops searching.
##
## This extension implements the usual search options, as well as regular
## expressions.
##
## Another nice feature is that while searching all matches are highlighted.
##
##
## Original Author: Tal Einat
##
## Modified by Roger D. Serwy to work with idlex and Python 3,
## as well as some bugfixes and improvements.
##
##
##
## """
config_extension_def = """
[SearchBar]
enable=1
is_incremental=1
reg_exp=0
match_case=0
whole_word=0
wrap_around=0
[Searchbar_cfgBindings]
toggle-search-bar=
"""
import time
import string
import re
import sys
if sys.version < '3':
import Tkinter
from Tkconstants import TOP, BOTTOM, LEFT, RIGHT, X, NONE
else:
import tkinter as Tkinter
from tkinter.constants import TOP, BOTTOM, LEFT, RIGHT, X, NONE
EXTNAME = 'SearchBar'
from idlelib.configHandler import idleConf
from idlelib.SearchEngine import SearchEngine
class SearchBarSearchEngine(SearchEngine):
""" Silence regex errors.
Incremental highlighting doesn't play well with mal-formed regex.
"""
def __init__(self, *args, **kw):
SearchEngine.__init__(self, *args, **kw)
self._error_callback_ptr = None
self._incrementalSearch = False
self.varlist = [self.revar, self.casevar, self.wordvar, self.wrapvar]
self.tracelist = []
def report_error(self, pat, msg, col=-1):
#print('report_error', pat, msg, col,self._incrementalSearch)
if self._incrementalSearch:
if self._error_callback_ptr:
return self._error_callback_ptr(pat, msg, col)
else:
return None
else:
return SearchEngine.report_error(self, pat, msg, col)
def error_callback(self, ptr):
# This is set by FindBar and ReplaceBar instances so that it
# calls the correct callback
self._error_callback_ptr = ptr
def load_cfg(self):
# Load settings from configuration handler - RDS 2012-02-03
self.revar.set(get_cfg('reg_exp', default=False))
self.casevar.set(get_cfg('match_case', default=False))
self.wordvar.set(get_cfg('whole_word', default=False))
self.wrapvar.set(get_cfg('wrap_around', default=False))
def save_cfg(self):
set_cfg('reg_exp', '%s' % self.revar.get())
set_cfg('match_case', '%s' % self.casevar.get())
set_cfg('whole_word', '%s' % self.wordvar.get())
set_cfg('wrap_around', '%s' % self.wrapvar.get())
def set_var_trace(self, ptr):
obs = []
for v in self.varlist:
obs.append(v.trace("w", ptr))
self.tracelist = zip(obs, self.varlist)
def remove_var_trace(self):
for obs, v in self.tracelist:
v.trace_vdelete('w', obs)
self.tracelist = []
def get_cfg(cfg, type="bool", default=True):
return idleConf.GetOption("extensions", EXTNAME,
cfg, type=type, default=default)
def set_cfg(cfg, b):
return idleConf.SetOption("extensions", EXTNAME,
cfg,'%s' % b)
class SearchBar:
menudefs = []
def __init__(self, editwin):
text = editwin.text
self.engine = engine = SearchBarSearchEngine(text)
self.fb = find_bar = FindBar(editwin, editwin.status_bar, engine)
self.rb = replace_bar = ReplaceBar(editwin, editwin.status_bar, engine)
def find_event(event):
replace_bar.hide_findbar_event(event, focus=False)
find_bar.show_findbar_event(event)
return "break"
text.bind("<<find>>", find_event)
def find_again_event(event):
find_bar.search_again_event(event)
return "break"
text.bind("<<find-again>>", find_again_event)
def find_selection_event(event):
find_bar.search_selection_event(event)
return "break"
text.bind("<<find-selection>>", find_selection_event)
def replace_event(event):
find_bar.hide_findbar_event(event, focus=False)
replace_bar.show_findbar_event(event)
return "break"
text.bind("<<replace>>", replace_event)
def close(self):
self.engine.save_cfg()
def FindBar(editwin, pack_after, engine):
return SearchBarWidget(editwin, pack_after, engine, is_replace=False)
def ReplaceBar(editwin, pack_after, engine):
return SearchBarWidget(editwin, pack_after, engine, is_replace=True)
class SearchBarWidget:
def __init__(self, editwin, pack_after, engine, is_replace=False):
self.text = editwin.text
self.root = self.text._root()
self.engine = engine
self.window_engine = get_window_engine(editwin)
self.is_replace = is_replace
self.top = editwin.top
self.pack_after = pack_after
self.widgets_built = False
self.shown = False
self.find_var = Tkinter.StringVar(self.root)
# The text widget's selection isn't shown when it doesn't have the
# focus. Let's replicate it so it will be seen while searching as well.
self.text.tag_configure("findsel",
background=self.text.tag_cget("sel","background"),
foreground=self.text.tag_cget("sel","foreground"))
self._is_incremental = None
self._expand_state = None
self.text.bind('<FocusIn>', self.text_focusin_event, '+')
def toggle_search_bar_event(self, event=None): # RDS - 2011-10-18
self.text.event_generate('<<find>>')
return "break"
def _show(self):
if not self.widgets_built:
self._build_widgets()
if not self.shown:
self.bar_frame.pack(side=BOTTOM, fill=X, expand=0, pady=1,
after=self.pack_after)
self.window_engine.show_find_marks()
self.shown = True # must be _before_ reset_selection()!
# Add the "findsel" tag, which looks like the selection
self._reset_selection()
self._is_incremental = self.is_incremental()
self._expand_state = None
self.engine.error_callback(self._error_callback)
self.engine.load_cfg()
self.engine.set_var_trace(self._incremental_callback)
def _hide(self, setcursor=False):
if self.widgets_built and self.shown:
v = self.text.yview()
self.bar_frame.pack_forget()
self.text.update_idletasks()
try:
self.text.yview_moveto(v[0]) # Tkinter work-around
except Exception as err: # This should never happen
print('SearchBar._hide', err)
self.window_engine.reset()
self.window_engine.hide_find_marks()
sel = self._get_selection()
self.shown = False # must be _after_ get_selection()!
if setcursor:
if sel:
self._set_selection(sel[0], sel[1])
self.text.mark_set("insert", sel[0])
else:
self._reset_selection()
self.text.see("insert")
self.text.tag_remove("findsel","1.0","end")
self._is_incremental = None
self._expand_state = None
self.engine.error_callback(None)
self.engine.save_cfg()
self.engine.remove_var_trace()
def _error_callback(self, pat, msg, col=-1):
# A callback for the SearchBarSearchEngine .report_error method
self.window_engine.reset()
pass
def is_incremental(self):
if self._is_incremental is None:
return get_cfg("is_incremental", default=False)
else:
return self._is_incremental
def _incremental_callback(self, *args):
self.engine._incrementalSearch = True
if self.shown and self.is_incremental():
if self.find_var.get():
self._safe_search(start=self.text.index("insert"))
else:
self.window_engine.reset()
self._clear_selection()
self.text.see("insert")
self.engine._incrementalSearch = False
def _build_widgets(self):
if not self.widgets_built:
def _make_entry(parent, label, var):
l = Tkinter.Label(parent, text=label)
l.pack(side=LEFT, fill=NONE, expand=0)
e = Tkinter.Entry(parent, textvariable=var, exportselection=0,
width=30, border=1)
e.pack(side=LEFT, fill=NONE, expand=0)
e.bind("<Escape>", self.hide_findbar_event)
return e
def _make_checkbutton(parent, label, var):
btn = Tkinter.Checkbutton(parent, anchor="w",
text=label, variable=var)
btn.pack(side=LEFT, fill=NONE, expand=0)
btn.bind("<Escape>", self.hide_findbar_event)
return btn
def _make_button(parent, label, command):
btn = Tkinter.Button(parent, text=label, command=command)
btn.pack(side=LEFT, fill=NONE, expand=0)
btn.bind("<Escape>", self.hide_findbar_event)
return btn
# Frame for the entire bar
self.bar_frame = Tkinter.Frame(self.top, border=1, relief="flat")
# Frame for the 'Find:' / 'Replace:' entry and direction
self.find_frame = Tkinter.Frame(self.bar_frame, border=0)
# Frame for the 'Find:' options
self.find_frame_options = Tkinter.Frame(self.bar_frame, border=0) # RDS - 2011-11-12
tabstop_top = Tkinter.Label(self.find_frame, takefocus=1, text='',
highlightthickness=0)
tabstop_top.pack(side=LEFT)
# 'Find:' / 'Replace:' entry
if not self.is_replace: tmp = "Find:"
else: tmp = "Replace:"
self.find_ent = _make_entry(self.find_frame,
tmp, self.find_var)
# Regular expression checkbutton
btn = _make_checkbutton(self.find_frame_options,
"Reg-Exp", self.engine.revar)
if self.engine.isre():
btn.select()
self.reg_btn = btn
# Match case checkbutton
btn = _make_checkbutton(self.find_frame_options,
"Match case", self.engine.casevar)
if self.engine.iscase():
btn.select()
self.case_btn = btn
# Whole word checkbutton
btn = _make_checkbutton(self.find_frame_options,
"Whole word", self.engine.wordvar)
if self.engine.isword():
btn.select()
self.word_btn = btn
# Wrap checkbutton
btn = _make_checkbutton(self.find_frame_options,
"Wrap around", self.engine.wrapvar)
if self.engine.iswrap():
btn.select()
self.wrap_btn = btn
# Direction checkbutton
Tkinter.Label(self.find_frame, text="Direction:").pack(side=LEFT,
fill=NONE,
expand=0,padx=6)
self.direction_txt_var = Tkinter.StringVar(self.root)
btn = Tkinter.Checkbutton(self.find_frame,
textvariable=self.direction_txt_var,
variable=self.engine.backvar,
command=self._update_direction_button,
indicatoron=0,
width=5,
)
btn.config(selectcolor=btn.cget("bg"))
btn.pack(side=LEFT, fill=NONE, expand=0)
if self.engine.isback():
btn.select()
self.direction_txt_var.set("Up")
else:
btn.deselect()
self.direction_txt_var.set("Down")
btn.bind("<Escape>",self.hide_findbar_event)
self.direction_btn = btn
self.find_frame.pack(side=TOP, fill=X, expand=1)
self.find_frame_options.pack(side=TOP, fill=X, expand=1)
if self.is_replace:
# Frame for the 'With:' entry + replace options
self.replace_frame = Tkinter.Frame(self.bar_frame, border=0)
self.replace_frame_buttons = Tkinter.Frame(self.bar_frame, border=0)
tmp = Tkinter.Label(self.replace_frame, takefocus=0, text='',
highlightthickness=0)
tmp.pack(side=LEFT)
self.replace_with_var = Tkinter.StringVar(self.root)
self.replace_ent = _make_entry(self.replace_frame,"With:",
self.replace_with_var)
self.find_btn = _make_button(self.replace_frame_buttons, "Find",
self._search)
self.replace_btn = _make_button(self.replace_frame_buttons, "Replace",
self._replace_event)
self.replace_find_btn = _make_button(self.replace_frame_buttons, "Replace+Find",
self._replace_find_event)
self.replace_all_btn = _make_button(self.replace_frame_buttons, "Replace All",
self._replace_all_event)
self.replace_frame.pack(side=TOP, fill=X, expand=0)
self.replace_frame_buttons.pack(side=TOP, fill=X, expand=0)
self.widgets_built = True
# Key bindings for the 'Find:' / 'Replace:' Entry widget
self.find_ent.bind("<Control-Key-f>", self._safe_search)
self.find_ent.bind("<Control-Key-g>", self._safe_search)
self.find_ent.bind("<Control-Key-R>", self._toggle_reg_event)
self.find_ent.bind("<Control-Key-C>", self._toggle_case_event)
self.find_ent.bind("<Control-Key-W>", self._toggle_wrap_event)
self.find_ent.bind("<Control-Key-D>", self._toggle_direction_event)
self.find_ent_expander = EntryExpander(self.find_ent, self.text)
self.find_ent_expander.bind("<Alt-Key-slash>")
callback = self.find_ent._register(self._incremental_callback)
self.find_ent.tk.call("trace", "variable", self.find_var, "w",
callback)
keySetName = idleConf.CurrentKeys()
find_bindings = idleConf.GetKeyBinding(keySetName, '<<find-again>>')
for key_event in find_bindings:
self.find_ent.bind(key_event, self._search) # RDS - 2011-11-03
if not self.is_replace:
# Key bindings for the 'Find:' Entry widget
self.find_ent.bind("<Return>", self._safe_search)
def tab_fix1(ev):
if ev.state & 1 == 0: # Windows Fix
self.find_ent.focus()
return "break"
self.wrap_btn.bind('<Tab>', tab_fix1)
def tab_fix2(ev):
self.wrap_btn.focus()
return "break"
tabstop_top.bind('<FocusIn>', tab_fix2)
else:
# Key bindings for the 'Replace:' Entry widget
self.find_ent.bind("<Return>", self._replace_bar_find_entry_return_event)
# Key bindings for the 'With:' Entry widget
self.replace_ent.bind("<Return>", self._replace_event)
self.replace_ent.bind("<Shift-Return>", self._safe_search)
self.replace_ent.bind("<Control-Key-f>", self._safe_search)
self.replace_ent.bind("<Control-Key-g>", self._safe_search)
self.replace_ent.bind("<Control-Key-R>", self._toggle_reg_event)
self.replace_ent.bind("<Control-Key-C>", self._toggle_case_event)
self.replace_ent.bind("<Control-Key-W>", self._toggle_wrap_event)
self.replace_ent.bind("<Control-Key-D>", self._toggle_direction_event)
self.replace_ent_expander = EntryExpander(self.replace_ent,
self.text)
self.replace_ent_expander.bind("<Alt-Key-slash>")
for key_event in find_bindings:
self.replace_ent.bind(key_event, self._search) # RDS - 2011-11-19
def tab_fix1(ev):
if ev.state & 1 == 0: # Windows Fix
self.find_ent.focus()
return "break"
self.replace_all_btn.bind('<Tab>', tab_fix1)
def tab_fix2(x):
self.replace_all_btn.focus()
return "break"
tabstop_top.bind('<FocusIn>', tab_fix2)
def _destroy_widgets(self):
if self.widgets_built:
self.bar_frame.destroy()
def show_findbar_event(self, event):
self.text.tag_raise('findmark')
self.text.tag_raise('findsel')
self.text.tag_raise('sel')
# Get the current selection
sel = self._get_selection()
if sel:
# Put the current selection in the "Find:" entry
# FIXME: don't overwrite regexp if it matches the selection
self.find_var.set(self.text.get(sel[0],sel[1]))
self._clear_selection()
# Now show the FindBar in all it's glory!
self._show()
# Set the focus to the "Find:"/"Replace:" entry
self.find_ent.focus()
# Select all of the text in the "Find:"/"Replace:" entry
self.find_ent.selection_range(0,"end")
# Hide the findbar if the focus is lost
#self.bar_frame.bind("<FocusOut>", self.hide_findbar_event)
# RDS - 2012-02-02 - Don't hide on focus_out, since regex error messages
# trigger this.
# Focus traversal (Tab or Shift-Tab) shouldn't return focus to
# the text widget
self.prev_text_takefocus_value = self.text.cget("takefocus")
self.text.config(takefocus=0)
self._incremental_callback()
return "break"
def text_focusin_event(self, event=None): # RDS - 2012-02-02
if not self.shown:
return
else:
self.hide_findbar_event(setcursor=False)
def hide_findbar_event(self, event=None, setcursor=True, focus=True):
if not self.shown:
return "break"
self._hide(setcursor=setcursor)
if focus:
self.text.focus()
return "break"
def search_again_event(self, event):
if self.engine.getpat():
return self._search(event)
else:
return self.show_findbar_event(event)
def search_selection_event(self, event):
# Get the current selection
sel = self._get_selection()
if not sel:
# No selection - beep and leave
self.text.bell()
return "break"
# Set the window's search engine's pattern to the current selection
self.find_var.set(self.text.get(sel[0],sel[1]))
return self._search(event)
def _toggle_reg_event(self, event):
self.reg_btn.invoke()
return "break"
def _toggle_case_event(self, event):
self.case_btn.invoke()
return "break"
def _toggle_wrap_event(self, event):
self.wrap_btn.invoke()
return "break"
def _toggle_direction_event(self, event):
self.direction_btn.invoke()
return "break"
def _update_direction_button(self):
if self.engine.backvar.get():
self.direction_txt_var.set("Up")
else:
self.direction_txt_var.set("Down")
def _replace_bar_find_entry_return_event(self, event=None):
# Set the focus to the "With:" entry
self.replace_ent.focus()
# Select all of the text in the "With:" entry
self.replace_ent.selection_range(0,"end")
return "break"
def _search_text(self, start, is_safe):
self.engine.patvar.set(self.find_var.get())
regexp = self.engine.getprog()
if not regexp:
# an error occurred.
return None
direction = not self.engine.isback()
wrap = self.engine.iswrap()
sel = self._get_selection()
if start is None:
if sel:
start = sel[0]
else:
start = self.text.index("insert")
if ( direction and sel and start == sel[0] and
regexp.match(self.text.get(sel[0],sel[1])) ):
_start = start + "+1c"
else:
_start = start
res = self.window_engine.findnext(regexp,
_start, direction, wrap, is_safe)
# ring the bell if the selection was found again
if sel and start == sel[0] and res == sel:
self.text.bell()
return res
def _search(self, event=None, start=None, is_safe=False):
t = time.time()
res = self._search_text(start, is_safe)
if res:
first, last = res
self._clear_selection()
self._set_selection(first, last)
self.text.see(first)
if not self.shown:
self.text.mark_set("insert", first)
else:
self._clear_selection()
self.text.bell()
return "break"
def _safe_search(self, event=None, start=None):
return self._search(event=event, start=start, is_safe=True)
def _replace_event(self, event=None):
self.engine.patvar.set(self.find_var.get())
regexp = self.engine.getprog()
if not regexp:
return "break"
# Replace if appropriate
sel = self._get_selection()
if sel and regexp.match(self.text.get(sel[0], sel[1])):
replace_with = self.replace_with_var.get()
self.text.undo_block_start()
if sel[0] != sel[1]:
self.text.delete(sel[0], sel[1])
if replace_with:
self.text.insert(sel[0], replace_with)
self.text.undo_block_stop()
self._clear_selection()
self._set_selection(sel[0], sel[0] + '+%ic' % len(replace_with))
self.text.mark_set("insert", sel[0] + '+%ic' % len(replace_with))
return "break"
def _replace_find_event(self, event=None): # RDS - 2011-10-18
self._replace_event(event)
return self._search(event, is_safe=False)
def _replace_all_event(self, event=None):
self.engine.patvar.set(self.find_var.get())
regexp = self.engine.getprog()
if not regexp:
return "break"
direction = not self.engine.isback()
wrap = self.engine.iswrap()
self.window_engine.replace_all(regexp, self.replace_with_var.get())
return "break"
### Selection related methods
def _clear_selection(self):
tagname = self.shown and "findsel" or "sel"
self.text.tag_remove(tagname, "1.0", "end")
def _set_selection(self, start, end):
self._clear_selection()
tagname = self.shown and "findsel" or "sel"
self.text.tag_add(tagname, start, end)
def _get_selection(self):
tagname = self.shown and "findsel" or "sel"
return self.text.tag_nextrange(tagname, '1.0', 'end')
def _reset_selection(self):
if self.shown:
sel = self.text.tag_nextrange("sel", '1.0', 'end')
if sel:
self._set_selection(sel[0], sel[1])
else:
self._clear_selection()
class EntryExpander(object):
"""Expand words in an entry, taking possible words from a text widget."""
def __init__(self, entry, text):
self.text = text
self.entry = entry
self.reset()
self.entry.bind('<Map>', self.reset)
def reset(self, event=None):
self._state = None
def bind(self, event_string):
self.entry.bind(event_string, self._expand_word_event)
def _expand_word_event(self, event=None):
curinsert = self.entry.index("insert")
curline = self.entry.get()
if not self._state:
words = self._get_expand_words()
index = 0
else:
words, index, insert, line = self._state
if insert != curinsert or line != curline:
words = self._get_expand_words()
index = 0
if not words:
self.text.bell()
return "break"
curword = self._get_curr_word()
newword = words[index]
index = (index + 1) % len(words)
if index == 0:
self.text.bell() # Warn the user that we cycled around
idx = int(self.entry.index("insert"))
self.entry.delete(str(idx - len(curword)), str(idx))
self.entry.insert("insert", newword)
curinsert = self.entry.index("insert")
curline = self.entry.get()
self._state = words, index, curinsert, curline
return "break"
def _get_expand_words(self):
curword = self._get_curr_word()
if not curword:
return []
regexp = re.compile(r"\b" + curword + r"\w+\b")
# Start at 'insert wordend' so current word is first
beforewords = regexp.findall(self.text.get("1.0", "insert wordend"))
beforewords.reverse()
afterwords = regexp.findall(self.text.get("insert wordend", "end"))
# Interleave the lists of words
# (This is the next best thing to sorting by distance)
allwords = []
for a,b in zip(beforewords, afterwords):
allwords += [a,b]
minlen = len(allwords)/2
allwords += beforewords[minlen:] + afterwords[minlen:]
words_list = []
words_dict = {}
for w in allwords:
if w not in words_dict:
words_dict[w] = w
words_list.append(w)
words_list.append(curword)
return words_list
_wordchars = string.ascii_letters + string.digits + "_"
def _get_curr_word(self):
line = self.entry.get()
i = j = self.entry.index("insert")
while i > 0 and line[i-1] in self._wordchars:
i = i-1
return line[i:j]
def get_window_engine(editwin):
if not hasattr(editwin, "_window_search_engine"):
editwin._window_search_engine = WindowSearchEngine(editwin.text)
return editwin._window_search_engine
class WindowSearchEngine:
def __init__(self, text):
self.text = text
# Initialize 'findmark' tag
self.hide_find_marks()
self.reset()
def __del__(self):
self.text.tag_delete("findmark")
def show_find_marks(self):
# Get the highlight colors for 'hit'
# Do this here (and not in __init__) for color config changes to take
# effect immediately
currentTheme = idleConf.CurrentTheme()
mark_fg = idleConf.GetHighlight(currentTheme, 'hit', fgBg='fg')
mark_bg = idleConf.GetHighlight(currentTheme, 'hit', fgBg='bg')
self.text.tag_configure("findmark",
foreground=mark_fg,
background=mark_bg)
def hide_find_marks(self):
self.text.tag_configure("findmark",
foreground='',
background='')
def reset(self):
self.text.tag_remove("findmark", "1.0", "end")
self.regexp = None
def _pos2idx(self, pos):
"Convert a position in the text string to a Text widget index"
return self.text.index("1.0+%dc"%pos)
def _set_regexp(self, regexp):
"Set the current regexp; search for and mark all matches in the text"
## When searching for an extension of the previous search,
## i.e. regexp.startswith(self.regexp), update hits instead of starting from
## scratch
self.reset()
self.regexp = regexp
txt = self.text.get("1.0", "end-1c")
prev = 0
line = 1
rfind = txt.rfind
tag_add = self.text.tag_add
for res in regexp.finditer(txt):
start, end = res.span()
line += txt[prev:start].count('\n')
prev = start
start_idx = "%d.%d" % (line,
start - (rfind('\n', 0, start) + 1))
end_idx = start_idx + '+%dc'%(end-start)
tag_add("findmark", start_idx, end_idx)
def findnext(self, regexp, start, direction=1, wrap=True, is_safe=False,
last=False):
"""Find the next text sequence which matches the given regexp.
The 'next' sequence is the one after the selection or the insert
cursor, or before if the direction is up instead of down.
The 'is_safe' argument tells whether it is safe to assume that the text
being searched has not been changed since the previous search; if the
text hasn't been changed then the search is almost trivial (due to
pre-processing).
"""
if regexp != self.regexp or not is_safe:
self._set_regexp(regexp)
# Search!
if direction:
next = self.text.tag_nextrange("findmark", start)
if not next:
if wrap:
# TODO: give message about wrap
next = self.text.tag_nextrange("findmark", '1.0', start)
else:
# TODO: no more matches message
pass
else:
next = self.text.tag_prevrange("findmark", start)
if not next:
if wrap:
# TODO: give message about wrap
next = self.text.tag_prevrange("findmark", 'end', start)
else:
# TODO: no more matches message
pass
if not last and not next:
if direction==1:
delta='-1c'
else:
delta='+1c'
q1 = self.text.index(start+delta)
next = self.findnext(regexp, q1, direction=direction,
wrap=wrap, is_safe=is_safe, last=True)
# the "last=True" flag is to prevent infinite recursion if something
# should go wrong with tag_nextrange or prevrange.
return next
def replace_all(self, regexp, replace_with):
oldhit = None
searchfrom = '1.0'
self.text.undo_block_start()
while True:
hit = self.findnext(regexp, searchfrom,
direction=1, wrap=False, is_safe=False)
if not hit or hit == oldhit:
break
oldhit = hit # avoid infinite loop due to ModifiedUndoDelegator in PyShell
first, last = hit
if first != last:
self.text.delete(first, last)
if replace_with:
self.text.insert(first, replace_with)
searchfrom = last
self.text.undo_block_stop()
def get_selection(text):
"Get the selection range in a text widget"
tmp = text.tag_nextrange("sel","1.0","end")
if tmp:
first, last = tmp
else:
first = last = text.index("insert")
return first, last
##def idx2ints(idx):
## "Convert a Text widget index to a (line, col) pair"
## line, col = map(int,idx.split(".")) # Fails on invalid index
## return line, col
##def ints2idx(ints):
## "Convert a (line, col) pair to Tk's Text widget's format."
## return "%d.%d" % ints # Fails on invalid index
| gpl-2.0 |
repotvsupertuga/repo | plugin.video.playlistLoader/resources/lib/chardet/jisfreq.py | 3131 | 47315 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
#
# Japanese frequency table, applied to both S-JIS and EUC-JP
# They are sorted in order.
# 128 --> 0.77094
# 256 --> 0.85710
# 512 --> 0.92635
# 1024 --> 0.97130
# 2048 --> 0.99431
#
# Ideal Distribution Ratio = 0.92635 / (1-0.92635) = 12.58
# Random Distribution Ration = 512 / (2965+62+83+86-512) = 0.191
#
# Typical Distribution Ratio, 25% of IDR
JIS_TYPICAL_DISTRIBUTION_RATIO = 3.0
# Char to FreqOrder table ,
JIS_TABLE_SIZE = 4368
JISCharToFreqOrder = (
40, 1, 6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, # 16
3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247, 18, 179,5071, 856,1661, # 32
1262,5072, 619, 127,3431,3512,3230,1899,1700, 232, 228,1294,1298, 284, 283,2041, # 48
2042,1061,1062, 48, 49, 44, 45, 433, 434,1040,1041, 996, 787,2997,1255,4305, # 64
2108,4609,1684,1648,5073,5074,5075,5076,5077,5078,3687,5079,4610,5080,3927,3928, # 80
5081,3296,3432, 290,2285,1471,2187,5082,2580,2825,1303,2140,1739,1445,2691,3375, # 96
1691,3297,4306,4307,4611, 452,3376,1182,2713,3688,3069,4308,5083,5084,5085,5086, # 112
5087,5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102, # 128
5103,5104,5105,5106,5107,5108,5109,5110,5111,5112,4097,5113,5114,5115,5116,5117, # 144
5118,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,5130,5131,5132,5133, # 160
5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,5149, # 176
5150,5151,5152,4612,5153,5154,5155,5156,5157,5158,5159,5160,5161,5162,5163,5164, # 192
5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,1472, 598, 618, 820,1205, # 208
1309,1412,1858,1307,1692,5176,5177,5178,5179,5180,5181,5182,1142,1452,1234,1172, # 224
1875,2043,2149,1793,1382,2973, 925,2404,1067,1241, 960,1377,2935,1491, 919,1217, # 240
1865,2030,1406,1499,2749,4098,5183,5184,5185,5186,5187,5188,2561,4099,3117,1804, # 256
2049,3689,4309,3513,1663,5189,3166,3118,3298,1587,1561,3433,5190,3119,1625,2998, # 272
3299,4613,1766,3690,2786,4614,5191,5192,5193,5194,2161, 26,3377, 2,3929, 20, # 288
3691, 47,4100, 50, 17, 16, 35, 268, 27, 243, 42, 155, 24, 154, 29, 184, # 304
4, 91, 14, 92, 53, 396, 33, 289, 9, 37, 64, 620, 21, 39, 321, 5, # 320
12, 11, 52, 13, 3, 208, 138, 0, 7, 60, 526, 141, 151,1069, 181, 275, # 336
1591, 83, 132,1475, 126, 331, 829, 15, 69, 160, 59, 22, 157, 55,1079, 312, # 352
109, 38, 23, 25, 10, 19, 79,5195, 61, 382,1124, 8, 30,5196,5197,5198, # 368
5199,5200,5201,5202,5203,5204,5205,5206, 89, 62, 74, 34,2416, 112, 139, 196, # 384
271, 149, 84, 607, 131, 765, 46, 88, 153, 683, 76, 874, 101, 258, 57, 80, # 400
32, 364, 121,1508, 169,1547, 68, 235, 145,2999, 41, 360,3027, 70, 63, 31, # 416
43, 259, 262,1383, 99, 533, 194, 66, 93, 846, 217, 192, 56, 106, 58, 565, # 432
280, 272, 311, 256, 146, 82, 308, 71, 100, 128, 214, 655, 110, 261, 104,1140, # 448
54, 51, 36, 87, 67,3070, 185,2618,2936,2020, 28,1066,2390,2059,5207,5208, # 464
5209,5210,5211,5212,5213,5214,5215,5216,4615,5217,5218,5219,5220,5221,5222,5223, # 480
5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,5235,5236,3514,5237,5238, # 496
5239,5240,5241,5242,5243,5244,2297,2031,4616,4310,3692,5245,3071,5246,3598,5247, # 512
4617,3231,3515,5248,4101,4311,4618,3808,4312,4102,5249,4103,4104,3599,5250,5251, # 528
5252,5253,5254,5255,5256,5257,5258,5259,5260,5261,5262,5263,5264,5265,5266,5267, # 544
5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,5279,5280,5281,5282,5283, # 560
5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,5294,5295,5296,5297,5298,5299, # 576
5300,5301,5302,5303,5304,5305,5306,5307,5308,5309,5310,5311,5312,5313,5314,5315, # 592
5316,5317,5318,5319,5320,5321,5322,5323,5324,5325,5326,5327,5328,5329,5330,5331, # 608
5332,5333,5334,5335,5336,5337,5338,5339,5340,5341,5342,5343,5344,5345,5346,5347, # 624
5348,5349,5350,5351,5352,5353,5354,5355,5356,5357,5358,5359,5360,5361,5362,5363, # 640
5364,5365,5366,5367,5368,5369,5370,5371,5372,5373,5374,5375,5376,5377,5378,5379, # 656
5380,5381, 363, 642,2787,2878,2788,2789,2316,3232,2317,3434,2011, 165,1942,3930, # 672
3931,3932,3933,5382,4619,5383,4620,5384,5385,5386,5387,5388,5389,5390,5391,5392, # 688
5393,5394,5395,5396,5397,5398,5399,5400,5401,5402,5403,5404,5405,5406,5407,5408, # 704
5409,5410,5411,5412,5413,5414,5415,5416,5417,5418,5419,5420,5421,5422,5423,5424, # 720
5425,5426,5427,5428,5429,5430,5431,5432,5433,5434,5435,5436,5437,5438,5439,5440, # 736
5441,5442,5443,5444,5445,5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456, # 752
5457,5458,5459,5460,5461,5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472, # 768
5473,5474,5475,5476,5477,5478,5479,5480,5481,5482,5483,5484,5485,5486,5487,5488, # 784
5489,5490,5491,5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504, # 800
5505,5506,5507,5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520, # 816
5521,5522,5523,5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536, # 832
5537,5538,5539,5540,5541,5542,5543,5544,5545,5546,5547,5548,5549,5550,5551,5552, # 848
5553,5554,5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568, # 864
5569,5570,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584, # 880
5585,5586,5587,5588,5589,5590,5591,5592,5593,5594,5595,5596,5597,5598,5599,5600, # 896
5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,5615,5616, # 912
5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,5632, # 928
5633,5634,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,5647,5648, # 944
5649,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,5661,5662,5663,5664, # 960
5665,5666,5667,5668,5669,5670,5671,5672,5673,5674,5675,5676,5677,5678,5679,5680, # 976
5681,5682,5683,5684,5685,5686,5687,5688,5689,5690,5691,5692,5693,5694,5695,5696, # 992
5697,5698,5699,5700,5701,5702,5703,5704,5705,5706,5707,5708,5709,5710,5711,5712, # 1008
5713,5714,5715,5716,5717,5718,5719,5720,5721,5722,5723,5724,5725,5726,5727,5728, # 1024
5729,5730,5731,5732,5733,5734,5735,5736,5737,5738,5739,5740,5741,5742,5743,5744, # 1040
5745,5746,5747,5748,5749,5750,5751,5752,5753,5754,5755,5756,5757,5758,5759,5760, # 1056
5761,5762,5763,5764,5765,5766,5767,5768,5769,5770,5771,5772,5773,5774,5775,5776, # 1072
5777,5778,5779,5780,5781,5782,5783,5784,5785,5786,5787,5788,5789,5790,5791,5792, # 1088
5793,5794,5795,5796,5797,5798,5799,5800,5801,5802,5803,5804,5805,5806,5807,5808, # 1104
5809,5810,5811,5812,5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824, # 1120
5825,5826,5827,5828,5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840, # 1136
5841,5842,5843,5844,5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856, # 1152
5857,5858,5859,5860,5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872, # 1168
5873,5874,5875,5876,5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888, # 1184
5889,5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904, # 1200
5905,5906,5907,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920, # 1216
5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936, # 1232
5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952, # 1248
5953,5954,5955,5956,5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968, # 1264
5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984, # 1280
5985,5986,5987,5988,5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000, # 1296
6001,6002,6003,6004,6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016, # 1312
6017,6018,6019,6020,6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032, # 1328
6033,6034,6035,6036,6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048, # 1344
6049,6050,6051,6052,6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064, # 1360
6065,6066,6067,6068,6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080, # 1376
6081,6082,6083,6084,6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096, # 1392
6097,6098,6099,6100,6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112, # 1408
6113,6114,2044,2060,4621, 997,1235, 473,1186,4622, 920,3378,6115,6116, 379,1108, # 1424
4313,2657,2735,3934,6117,3809, 636,3233, 573,1026,3693,3435,2974,3300,2298,4105, # 1440
854,2937,2463, 393,2581,2417, 539, 752,1280,2750,2480, 140,1161, 440, 708,1569, # 1456
665,2497,1746,1291,1523,3000, 164,1603, 847,1331, 537,1997, 486, 508,1693,2418, # 1472
1970,2227, 878,1220, 299,1030, 969, 652,2751, 624,1137,3301,2619, 65,3302,2045, # 1488
1761,1859,3120,1930,3694,3516, 663,1767, 852, 835,3695, 269, 767,2826,2339,1305, # 1504
896,1150, 770,1616,6118, 506,1502,2075,1012,2519, 775,2520,2975,2340,2938,4314, # 1520
3028,2086,1224,1943,2286,6119,3072,4315,2240,1273,1987,3935,1557, 175, 597, 985, # 1536
3517,2419,2521,1416,3029, 585, 938,1931,1007,1052,1932,1685,6120,3379,4316,4623, # 1552
804, 599,3121,1333,2128,2539,1159,1554,2032,3810, 687,2033,2904, 952, 675,1467, # 1568
3436,6121,2241,1096,1786,2440,1543,1924, 980,1813,2228, 781,2692,1879, 728,1918, # 1584
3696,4624, 548,1950,4625,1809,1088,1356,3303,2522,1944, 502, 972, 373, 513,2827, # 1600
586,2377,2391,1003,1976,1631,6122,2464,1084, 648,1776,4626,2141, 324, 962,2012, # 1616
2177,2076,1384, 742,2178,1448,1173,1810, 222, 102, 301, 445, 125,2420, 662,2498, # 1632
277, 200,1476,1165,1068, 224,2562,1378,1446, 450,1880, 659, 791, 582,4627,2939, # 1648
3936,1516,1274, 555,2099,3697,1020,1389,1526,3380,1762,1723,1787,2229, 412,2114, # 1664
1900,2392,3518, 512,2597, 427,1925,2341,3122,1653,1686,2465,2499, 697, 330, 273, # 1680
380,2162, 951, 832, 780, 991,1301,3073, 965,2270,3519, 668,2523,2636,1286, 535, # 1696
1407, 518, 671, 957,2658,2378, 267, 611,2197,3030,6123, 248,2299, 967,1799,2356, # 1712
850,1418,3437,1876,1256,1480,2828,1718,6124,6125,1755,1664,2405,6126,4628,2879, # 1728
2829, 499,2179, 676,4629, 557,2329,2214,2090, 325,3234, 464, 811,3001, 992,2342, # 1744
2481,1232,1469, 303,2242, 466,1070,2163, 603,1777,2091,4630,2752,4631,2714, 322, # 1760
2659,1964,1768, 481,2188,1463,2330,2857,3600,2092,3031,2421,4632,2318,2070,1849, # 1776
2598,4633,1302,2254,1668,1701,2422,3811,2905,3032,3123,2046,4106,1763,1694,4634, # 1792
1604, 943,1724,1454, 917, 868,2215,1169,2940, 552,1145,1800,1228,1823,1955, 316, # 1808
1080,2510, 361,1807,2830,4107,2660,3381,1346,1423,1134,4108,6127, 541,1263,1229, # 1824
1148,2540, 545, 465,1833,2880,3438,1901,3074,2482, 816,3937, 713,1788,2500, 122, # 1840
1575, 195,1451,2501,1111,6128, 859, 374,1225,2243,2483,4317, 390,1033,3439,3075, # 1856
2524,1687, 266, 793,1440,2599, 946, 779, 802, 507, 897,1081, 528,2189,1292, 711, # 1872
1866,1725,1167,1640, 753, 398,2661,1053, 246, 348,4318, 137,1024,3440,1600,2077, # 1888
2129, 825,4319, 698, 238, 521, 187,2300,1157,2423,1641,1605,1464,1610,1097,2541, # 1904
1260,1436, 759,2255,1814,2150, 705,3235, 409,2563,3304, 561,3033,2005,2564, 726, # 1920
1956,2343,3698,4109, 949,3812,3813,3520,1669, 653,1379,2525, 881,2198, 632,2256, # 1936
1027, 778,1074, 733,1957, 514,1481,2466, 554,2180, 702,3938,1606,1017,1398,6129, # 1952
1380,3521, 921, 993,1313, 594, 449,1489,1617,1166, 768,1426,1360, 495,1794,3601, # 1968
1177,3602,1170,4320,2344, 476, 425,3167,4635,3168,1424, 401,2662,1171,3382,1998, # 1984
1089,4110, 477,3169, 474,6130,1909, 596,2831,1842, 494, 693,1051,1028,1207,3076, # 2000
606,2115, 727,2790,1473,1115, 743,3522, 630, 805,1532,4321,2021, 366,1057, 838, # 2016
684,1114,2142,4322,2050,1492,1892,1808,2271,3814,2424,1971,1447,1373,3305,1090, # 2032
1536,3939,3523,3306,1455,2199, 336, 369,2331,1035, 584,2393, 902, 718,2600,6131, # 2048
2753, 463,2151,1149,1611,2467, 715,1308,3124,1268, 343,1413,3236,1517,1347,2663, # 2064
2093,3940,2022,1131,1553,2100,2941,1427,3441,2942,1323,2484,6132,1980, 872,2368, # 2080
2441,2943, 320,2369,2116,1082, 679,1933,3941,2791,3815, 625,1143,2023, 422,2200, # 2096
3816,6133, 730,1695, 356,2257,1626,2301,2858,2637,1627,1778, 937, 883,2906,2693, # 2112
3002,1769,1086, 400,1063,1325,3307,2792,4111,3077, 456,2345,1046, 747,6134,1524, # 2128
884,1094,3383,1474,2164,1059, 974,1688,2181,2258,1047, 345,1665,1187, 358, 875, # 2144
3170, 305, 660,3524,2190,1334,1135,3171,1540,1649,2542,1527, 927, 968,2793, 885, # 2160
1972,1850, 482, 500,2638,1218,1109,1085,2543,1654,2034, 876, 78,2287,1482,1277, # 2176
861,1675,1083,1779, 724,2754, 454, 397,1132,1612,2332, 893, 672,1237, 257,2259, # 2192
2370, 135,3384, 337,2244, 547, 352, 340, 709,2485,1400, 788,1138,2511, 540, 772, # 2208
1682,2260,2272,2544,2013,1843,1902,4636,1999,1562,2288,4637,2201,1403,1533, 407, # 2224
576,3308,1254,2071, 978,3385, 170, 136,1201,3125,2664,3172,2394, 213, 912, 873, # 2240
3603,1713,2202, 699,3604,3699, 813,3442, 493, 531,1054, 468,2907,1483, 304, 281, # 2256
4112,1726,1252,2094, 339,2319,2130,2639, 756,1563,2944, 748, 571,2976,1588,2425, # 2272
2715,1851,1460,2426,1528,1392,1973,3237, 288,3309, 685,3386, 296, 892,2716,2216, # 2288
1570,2245, 722,1747,2217, 905,3238,1103,6135,1893,1441,1965, 251,1805,2371,3700, # 2304
2601,1919,1078, 75,2182,1509,1592,1270,2640,4638,2152,6136,3310,3817, 524, 706, # 2320
1075, 292,3818,1756,2602, 317, 98,3173,3605,3525,1844,2218,3819,2502, 814, 567, # 2336
385,2908,1534,6137, 534,1642,3239, 797,6138,1670,1529, 953,4323, 188,1071, 538, # 2352
178, 729,3240,2109,1226,1374,2000,2357,2977, 731,2468,1116,2014,2051,6139,1261, # 2368
1593, 803,2859,2736,3443, 556, 682, 823,1541,6140,1369,2289,1706,2794, 845, 462, # 2384
2603,2665,1361, 387, 162,2358,1740, 739,1770,1720,1304,1401,3241,1049, 627,1571, # 2400
2427,3526,1877,3942,1852,1500, 431,1910,1503, 677, 297,2795, 286,1433,1038,1198, # 2416
2290,1133,1596,4113,4639,2469,1510,1484,3943,6141,2442, 108, 712,4640,2372, 866, # 2432
3701,2755,3242,1348, 834,1945,1408,3527,2395,3243,1811, 824, 994,1179,2110,1548, # 2448
1453, 790,3003, 690,4324,4325,2832,2909,3820,1860,3821, 225,1748, 310, 346,1780, # 2464
2470, 821,1993,2717,2796, 828, 877,3528,2860,2471,1702,2165,2910,2486,1789, 453, # 2480
359,2291,1676, 73,1164,1461,1127,3311, 421, 604, 314,1037, 589, 116,2487, 737, # 2496
837,1180, 111, 244, 735,6142,2261,1861,1362, 986, 523, 418, 581,2666,3822, 103, # 2512
855, 503,1414,1867,2488,1091, 657,1597, 979, 605,1316,4641,1021,2443,2078,2001, # 2528
1209, 96, 587,2166,1032, 260,1072,2153, 173, 94, 226,3244, 819,2006,4642,4114, # 2544
2203, 231,1744, 782, 97,2667, 786,3387, 887, 391, 442,2219,4326,1425,6143,2694, # 2560
633,1544,1202, 483,2015, 592,2052,1958,2472,1655, 419, 129,4327,3444,3312,1714, # 2576
1257,3078,4328,1518,1098, 865,1310,1019,1885,1512,1734, 469,2444, 148, 773, 436, # 2592
1815,1868,1128,1055,4329,1245,2756,3445,2154,1934,1039,4643, 579,1238, 932,2320, # 2608
353, 205, 801, 115,2428, 944,2321,1881, 399,2565,1211, 678, 766,3944, 335,2101, # 2624
1459,1781,1402,3945,2737,2131,1010, 844, 981,1326,1013, 550,1816,1545,2620,1335, # 2640
1008, 371,2881, 936,1419,1613,3529,1456,1395,2273,1834,2604,1317,2738,2503, 416, # 2656
1643,4330, 806,1126, 229, 591,3946,1314,1981,1576,1837,1666, 347,1790, 977,3313, # 2672
764,2861,1853, 688,2429,1920,1462, 77, 595, 415,2002,3034, 798,1192,4115,6144, # 2688
2978,4331,3035,2695,2582,2072,2566, 430,2430,1727, 842,1396,3947,3702, 613, 377, # 2704
278, 236,1417,3388,3314,3174, 757,1869, 107,3530,6145,1194, 623,2262, 207,1253, # 2720
2167,3446,3948, 492,1117,1935, 536,1838,2757,1246,4332, 696,2095,2406,1393,1572, # 2736
3175,1782, 583, 190, 253,1390,2230, 830,3126,3389, 934,3245,1703,1749,2979,1870, # 2752
2545,1656,2204, 869,2346,4116,3176,1817, 496,1764,4644, 942,1504, 404,1903,1122, # 2768
1580,3606,2945,1022, 515, 372,1735, 955,2431,3036,6146,2797,1110,2302,2798, 617, # 2784
6147, 441, 762,1771,3447,3607,3608,1904, 840,3037, 86, 939,1385, 572,1370,2445, # 2800
1336, 114,3703, 898, 294, 203,3315, 703,1583,2274, 429, 961,4333,1854,1951,3390, # 2816
2373,3704,4334,1318,1381, 966,1911,2322,1006,1155, 309, 989, 458,2718,1795,1372, # 2832
1203, 252,1689,1363,3177, 517,1936, 168,1490, 562, 193,3823,1042,4117,1835, 551, # 2848
470,4645, 395, 489,3448,1871,1465,2583,2641, 417,1493, 279,1295, 511,1236,1119, # 2864
72,1231,1982,1812,3004, 871,1564, 984,3449,1667,2696,2096,4646,2347,2833,1673, # 2880
3609, 695,3246,2668, 807,1183,4647, 890, 388,2333,1801,1457,2911,1765,1477,1031, # 2896
3316,3317,1278,3391,2799,2292,2526, 163,3450,4335,2669,1404,1802,6148,2323,2407, # 2912
1584,1728,1494,1824,1269, 298, 909,3318,1034,1632, 375, 776,1683,2061, 291, 210, # 2928
1123, 809,1249,1002,2642,3038, 206,1011,2132, 144, 975, 882,1565, 342, 667, 754, # 2944
1442,2143,1299,2303,2062, 447, 626,2205,1221,2739,2912,1144,1214,2206,2584, 760, # 2960
1715, 614, 950,1281,2670,2621, 810, 577,1287,2546,4648, 242,2168, 250,2643, 691, # 2976
123,2644, 647, 313,1029, 689,1357,2946,1650, 216, 771,1339,1306, 808,2063, 549, # 2992
913,1371,2913,2914,6149,1466,1092,1174,1196,1311,2605,2396,1783,1796,3079, 406, # 3008
2671,2117,3949,4649, 487,1825,2220,6150,2915, 448,2348,1073,6151,2397,1707, 130, # 3024
900,1598, 329, 176,1959,2527,1620,6152,2275,4336,3319,1983,2191,3705,3610,2155, # 3040
3706,1912,1513,1614,6153,1988, 646, 392,2304,1589,3320,3039,1826,1239,1352,1340, # 3056
2916, 505,2567,1709,1437,2408,2547, 906,6154,2672, 384,1458,1594,1100,1329, 710, # 3072
423,3531,2064,2231,2622,1989,2673,1087,1882, 333, 841,3005,1296,2882,2379, 580, # 3088
1937,1827,1293,2585, 601, 574, 249,1772,4118,2079,1120, 645, 901,1176,1690, 795, # 3104
2207, 478,1434, 516,1190,1530, 761,2080, 930,1264, 355, 435,1552, 644,1791, 987, # 3120
220,1364,1163,1121,1538, 306,2169,1327,1222, 546,2645, 218, 241, 610,1704,3321, # 3136
1984,1839,1966,2528, 451,6155,2586,3707,2568, 907,3178, 254,2947, 186,1845,4650, # 3152
745, 432,1757, 428,1633, 888,2246,2221,2489,3611,2118,1258,1265, 956,3127,1784, # 3168
4337,2490, 319, 510, 119, 457,3612, 274,2035,2007,4651,1409,3128, 970,2758, 590, # 3184
2800, 661,2247,4652,2008,3950,1420,1549,3080,3322,3951,1651,1375,2111, 485,2491, # 3200
1429,1156,6156,2548,2183,1495, 831,1840,2529,2446, 501,1657, 307,1894,3247,1341, # 3216
666, 899,2156,1539,2549,1559, 886, 349,2208,3081,2305,1736,3824,2170,2759,1014, # 3232
1913,1386, 542,1397,2948, 490, 368, 716, 362, 159, 282,2569,1129,1658,1288,1750, # 3248
2674, 276, 649,2016, 751,1496, 658,1818,1284,1862,2209,2087,2512,3451, 622,2834, # 3264
376, 117,1060,2053,1208,1721,1101,1443, 247,1250,3179,1792,3952,2760,2398,3953, # 3280
6157,2144,3708, 446,2432,1151,2570,3452,2447,2761,2835,1210,2448,3082, 424,2222, # 3296
1251,2449,2119,2836, 504,1581,4338, 602, 817, 857,3825,2349,2306, 357,3826,1470, # 3312
1883,2883, 255, 958, 929,2917,3248, 302,4653,1050,1271,1751,2307,1952,1430,2697, # 3328
2719,2359, 354,3180, 777, 158,2036,4339,1659,4340,4654,2308,2949,2248,1146,2232, # 3344
3532,2720,1696,2623,3827,6158,3129,1550,2698,1485,1297,1428, 637, 931,2721,2145, # 3360
914,2550,2587, 81,2450, 612, 827,2646,1242,4655,1118,2884, 472,1855,3181,3533, # 3376
3534, 569,1353,2699,1244,1758,2588,4119,2009,2762,2171,3709,1312,1531,6159,1152, # 3392
1938, 134,1830, 471,3710,2276,1112,1535,3323,3453,3535, 982,1337,2950, 488, 826, # 3408
674,1058,1628,4120,2017, 522,2399, 211, 568,1367,3454, 350, 293,1872,1139,3249, # 3424
1399,1946,3006,1300,2360,3324, 588, 736,6160,2606, 744, 669,3536,3828,6161,1358, # 3440
199, 723, 848, 933, 851,1939,1505,1514,1338,1618,1831,4656,1634,3613, 443,2740, # 3456
3829, 717,1947, 491,1914,6162,2551,1542,4121,1025,6163,1099,1223, 198,3040,2722, # 3472
370, 410,1905,2589, 998,1248,3182,2380, 519,1449,4122,1710, 947, 928,1153,4341, # 3488
2277, 344,2624,1511, 615, 105, 161,1212,1076,1960,3130,2054,1926,1175,1906,2473, # 3504
414,1873,2801,6164,2309, 315,1319,3325, 318,2018,2146,2157, 963, 631, 223,4342, # 3520
4343,2675, 479,3711,1197,2625,3712,2676,2361,6165,4344,4123,6166,2451,3183,1886, # 3536
2184,1674,1330,1711,1635,1506, 799, 219,3250,3083,3954,1677,3713,3326,2081,3614, # 3552
1652,2073,4657,1147,3041,1752, 643,1961, 147,1974,3955,6167,1716,2037, 918,3007, # 3568
1994, 120,1537, 118, 609,3184,4345, 740,3455,1219, 332,1615,3830,6168,1621,2980, # 3584
1582, 783, 212, 553,2350,3714,1349,2433,2082,4124, 889,6169,2310,1275,1410, 973, # 3600
166,1320,3456,1797,1215,3185,2885,1846,2590,2763,4658, 629, 822,3008, 763, 940, # 3616
1990,2862, 439,2409,1566,1240,1622, 926,1282,1907,2764, 654,2210,1607, 327,1130, # 3632
3956,1678,1623,6170,2434,2192, 686, 608,3831,3715, 903,3957,3042,6171,2741,1522, # 3648
1915,1105,1555,2552,1359, 323,3251,4346,3457, 738,1354,2553,2311,2334,1828,2003, # 3664
3832,1753,2351,1227,6172,1887,4125,1478,6173,2410,1874,1712,1847, 520,1204,2607, # 3680
264,4659, 836,2677,2102, 600,4660,3833,2278,3084,6174,4347,3615,1342, 640, 532, # 3696
543,2608,1888,2400,2591,1009,4348,1497, 341,1737,3616,2723,1394, 529,3252,1321, # 3712
983,4661,1515,2120, 971,2592, 924, 287,1662,3186,4349,2700,4350,1519, 908,1948, # 3728
2452, 156, 796,1629,1486,2223,2055, 694,4126,1259,1036,3392,1213,2249,2742,1889, # 3744
1230,3958,1015, 910, 408, 559,3617,4662, 746, 725, 935,4663,3959,3009,1289, 563, # 3760
867,4664,3960,1567,2981,2038,2626, 988,2263,2381,4351, 143,2374, 704,1895,6175, # 3776
1188,3716,2088, 673,3085,2362,4352, 484,1608,1921,2765,2918, 215, 904,3618,3537, # 3792
894, 509, 976,3043,2701,3961,4353,2837,2982, 498,6176,6177,1102,3538,1332,3393, # 3808
1487,1636,1637, 233, 245,3962, 383, 650, 995,3044, 460,1520,1206,2352, 749,3327, # 3824
530, 700, 389,1438,1560,1773,3963,2264, 719,2951,2724,3834, 870,1832,1644,1000, # 3840
839,2474,3717, 197,1630,3394, 365,2886,3964,1285,2133, 734, 922, 818,1106, 732, # 3856
480,2083,1774,3458, 923,2279,1350, 221,3086, 85,2233,2234,3835,1585,3010,2147, # 3872
1387,1705,2382,1619,2475, 133, 239,2802,1991,1016,2084,2383, 411,2838,1113, 651, # 3888
1985,1160,3328, 990,1863,3087,1048,1276,2647, 265,2627,1599,3253,2056, 150, 638, # 3904
2019, 656, 853, 326,1479, 680,1439,4354,1001,1759, 413,3459,3395,2492,1431, 459, # 3920
4355,1125,3329,2265,1953,1450,2065,2863, 849, 351,2678,3131,3254,3255,1104,1577, # 3936
227,1351,1645,2453,2193,1421,2887, 812,2121, 634, 95,2435, 201,2312,4665,1646, # 3952
1671,2743,1601,2554,2702,2648,2280,1315,1366,2089,3132,1573,3718,3965,1729,1189, # 3968
328,2679,1077,1940,1136, 558,1283, 964,1195, 621,2074,1199,1743,3460,3619,1896, # 3984
1916,1890,3836,2952,1154,2112,1064, 862, 378,3011,2066,2113,2803,1568,2839,6178, # 4000
3088,2919,1941,1660,2004,1992,2194, 142, 707,1590,1708,1624,1922,1023,1836,1233, # 4016
1004,2313, 789, 741,3620,6179,1609,2411,1200,4127,3719,3720,4666,2057,3721, 593, # 4032
2840, 367,2920,1878,6180,3461,1521, 628,1168, 692,2211,2649, 300, 720,2067,2571, # 4048
2953,3396, 959,2504,3966,3539,3462,1977, 701,6181, 954,1043, 800, 681, 183,3722, # 4064
1803,1730,3540,4128,2103, 815,2314, 174, 467, 230,2454,1093,2134, 755,3541,3397, # 4080
1141,1162,6182,1738,2039, 270,3256,2513,1005,1647,2185,3837, 858,1679,1897,1719, # 4096
2954,2324,1806, 402, 670, 167,4129,1498,2158,2104, 750,6183, 915, 189,1680,1551, # 4112
455,4356,1501,2455, 405,1095,2955, 338,1586,1266,1819, 570, 641,1324, 237,1556, # 4128
2650,1388,3723,6184,1368,2384,1343,1978,3089,2436, 879,3724, 792,1191, 758,3012, # 4144
1411,2135,1322,4357, 240,4667,1848,3725,1574,6185, 420,3045,1546,1391, 714,4358, # 4160
1967, 941,1864, 863, 664, 426, 560,1731,2680,1785,2864,1949,2363, 403,3330,1415, # 4176
1279,2136,1697,2335, 204, 721,2097,3838, 90,6186,2085,2505, 191,3967, 124,2148, # 4192
1376,1798,1178,1107,1898,1405, 860,4359,1243,1272,2375,2983,1558,2456,1638, 113, # 4208
3621, 578,1923,2609, 880, 386,4130, 784,2186,2266,1422,2956,2172,1722, 497, 263, # 4224
2514,1267,2412,2610, 177,2703,3542, 774,1927,1344, 616,1432,1595,1018, 172,4360, # 4240
2325, 911,4361, 438,1468,3622, 794,3968,2024,2173,1681,1829,2957, 945, 895,3090, # 4256
575,2212,2476, 475,2401,2681, 785,2744,1745,2293,2555,1975,3133,2865, 394,4668, # 4272
3839, 635,4131, 639, 202,1507,2195,2766,1345,1435,2572,3726,1908,1184,1181,2457, # 4288
3727,3134,4362, 843,2611, 437, 916,4669, 234, 769,1884,3046,3047,3623, 833,6187, # 4304
1639,2250,2402,1355,1185,2010,2047, 999, 525,1732,1290,1488,2612, 948,1578,3728, # 4320
2413,2477,1216,2725,2159, 334,3840,1328,3624,2921,1525,4132, 564,1056, 891,4363, # 4336
1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352
2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368 #last 512
#Everything below is of no interest for detection purpose
2138,2122,3730,2888,1995,1820,1044,6190,6191,6192,6193,6194,6195,6196,6197,6198, # 4384
6199,6200,6201,6202,6203,6204,6205,4670,6206,6207,6208,6209,6210,6211,6212,6213, # 4400
6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,6224,6225,6226,6227,6228,6229, # 4416
6230,6231,6232,6233,6234,6235,6236,6237,3187,6238,6239,3969,6240,6241,6242,6243, # 4432
6244,4671,6245,6246,4672,6247,6248,4133,6249,6250,4364,6251,2923,2556,2613,4673, # 4448
4365,3970,6252,6253,6254,6255,4674,6256,6257,6258,2768,2353,4366,4675,4676,3188, # 4464
4367,3463,6259,4134,4677,4678,6260,2267,6261,3842,3332,4368,3543,6262,6263,6264, # 4480
3013,1954,1928,4135,4679,6265,6266,2478,3091,6267,4680,4369,6268,6269,1699,6270, # 4496
3544,4136,4681,6271,4137,6272,4370,2804,6273,6274,2593,3971,3972,4682,6275,2236, # 4512
4683,6276,6277,4684,6278,6279,4138,3973,4685,6280,6281,3258,6282,6283,6284,6285, # 4528
3974,4686,2841,3975,6286,6287,3545,6288,6289,4139,4687,4140,6290,4141,6291,4142, # 4544
6292,6293,3333,6294,6295,6296,4371,6297,3399,6298,6299,4372,3976,6300,6301,6302, # 4560
4373,6303,6304,3843,3731,6305,4688,4374,6306,6307,3259,2294,6308,3732,2530,4143, # 4576
6309,4689,6310,6311,6312,3048,6313,6314,4690,3733,2237,6315,6316,2282,3334,6317, # 4592
6318,3844,6319,6320,4691,6321,3400,4692,6322,4693,6323,3049,6324,4375,6325,3977, # 4608
6326,6327,6328,3546,6329,4694,3335,6330,4695,4696,6331,6332,6333,6334,4376,3978, # 4624
6335,4697,3979,4144,6336,3980,4698,6337,6338,6339,6340,6341,4699,4700,4701,6342, # 4640
6343,4702,6344,6345,4703,6346,6347,4704,6348,4705,4706,3135,6349,4707,6350,4708, # 4656
6351,4377,6352,4709,3734,4145,6353,2506,4710,3189,6354,3050,4711,3981,6355,3547, # 4672
3014,4146,4378,3735,2651,3845,3260,3136,2224,1986,6356,3401,6357,4712,2594,3627, # 4688
3137,2573,3736,3982,4713,3628,4714,4715,2682,3629,4716,6358,3630,4379,3631,6359, # 4704
6360,6361,3983,6362,6363,6364,6365,4147,3846,4717,6366,6367,3737,2842,6368,4718, # 4720
2628,6369,3261,6370,2386,6371,6372,3738,3984,4719,3464,4720,3402,6373,2924,3336, # 4736
4148,2866,6374,2805,3262,4380,2704,2069,2531,3138,2806,2984,6375,2769,6376,4721, # 4752
4722,3403,6377,6378,3548,6379,6380,2705,3092,1979,4149,2629,3337,2889,6381,3338, # 4768
4150,2557,3339,4381,6382,3190,3263,3739,6383,4151,4723,4152,2558,2574,3404,3191, # 4784
6384,6385,4153,6386,4724,4382,6387,6388,4383,6389,6390,4154,6391,4725,3985,6392, # 4800
3847,4155,6393,6394,6395,6396,6397,3465,6398,4384,6399,6400,6401,6402,6403,6404, # 4816
4156,6405,6406,6407,6408,2123,6409,6410,2326,3192,4726,6411,6412,6413,6414,4385, # 4832
4157,6415,6416,4158,6417,3093,3848,6418,3986,6419,6420,3849,6421,6422,6423,4159, # 4848
6424,6425,4160,6426,3740,6427,6428,6429,6430,3987,6431,4727,6432,2238,6433,6434, # 4864
4386,3988,6435,6436,3632,6437,6438,2843,6439,6440,6441,6442,3633,6443,2958,6444, # 4880
6445,3466,6446,2364,4387,3850,6447,4388,2959,3340,6448,3851,6449,4728,6450,6451, # 4896
3264,4729,6452,3193,6453,4389,4390,2706,3341,4730,6454,3139,6455,3194,6456,3051, # 4912
2124,3852,1602,4391,4161,3853,1158,3854,4162,3989,4392,3990,4731,4732,4393,2040, # 4928
4163,4394,3265,6457,2807,3467,3855,6458,6459,6460,3991,3468,4733,4734,6461,3140, # 4944
2960,6462,4735,6463,6464,6465,6466,4736,4737,4738,4739,6467,6468,4164,2403,3856, # 4960
6469,6470,2770,2844,6471,4740,6472,6473,6474,6475,6476,6477,6478,3195,6479,4741, # 4976
4395,6480,2867,6481,4742,2808,6482,2493,4165,6483,6484,6485,6486,2295,4743,6487, # 4992
6488,6489,3634,6490,6491,6492,6493,6494,6495,6496,2985,4744,6497,6498,4745,6499, # 5008
6500,2925,3141,4166,6501,6502,4746,6503,6504,4747,6505,6506,6507,2890,6508,6509, # 5024
6510,6511,6512,6513,6514,6515,6516,6517,6518,6519,3469,4167,6520,6521,6522,4748, # 5040
4396,3741,4397,4749,4398,3342,2125,4750,6523,4751,4752,4753,3052,6524,2961,4168, # 5056
6525,4754,6526,4755,4399,2926,4169,6527,3857,6528,4400,4170,6529,4171,6530,6531, # 5072
2595,6532,6533,6534,6535,3635,6536,6537,6538,6539,6540,6541,6542,4756,6543,6544, # 5088
6545,6546,6547,6548,4401,6549,6550,6551,6552,4402,3405,4757,4403,6553,6554,6555, # 5104
4172,3742,6556,6557,6558,3992,3636,6559,6560,3053,2726,6561,3549,4173,3054,4404, # 5120
6562,6563,3993,4405,3266,3550,2809,4406,6564,6565,6566,4758,4759,6567,3743,6568, # 5136
4760,3744,4761,3470,6569,6570,6571,4407,6572,3745,4174,6573,4175,2810,4176,3196, # 5152
4762,6574,4177,6575,6576,2494,2891,3551,6577,6578,3471,6579,4408,6580,3015,3197, # 5168
6581,3343,2532,3994,3858,6582,3094,3406,4409,6583,2892,4178,4763,4410,3016,4411, # 5184
6584,3995,3142,3017,2683,6585,4179,6586,6587,4764,4412,6588,6589,4413,6590,2986, # 5200
6591,2962,3552,6592,2963,3472,6593,6594,4180,4765,6595,6596,2225,3267,4414,6597, # 5216
3407,3637,4766,6598,6599,3198,6600,4415,6601,3859,3199,6602,3473,4767,2811,4416, # 5232
1856,3268,3200,2575,3996,3997,3201,4417,6603,3095,2927,6604,3143,6605,2268,6606, # 5248
3998,3860,3096,2771,6607,6608,3638,2495,4768,6609,3861,6610,3269,2745,4769,4181, # 5264
3553,6611,2845,3270,6612,6613,6614,3862,6615,6616,4770,4771,6617,3474,3999,4418, # 5280
4419,6618,3639,3344,6619,4772,4182,6620,2126,6621,6622,6623,4420,4773,6624,3018, # 5296
6625,4774,3554,6626,4183,2025,3746,6627,4184,2707,6628,4421,4422,3097,1775,4185, # 5312
3555,6629,6630,2868,6631,6632,4423,6633,6634,4424,2414,2533,2928,6635,4186,2387, # 5328
6636,4775,6637,4187,6638,1891,4425,3202,3203,6639,6640,4776,6641,3345,6642,6643, # 5344
3640,6644,3475,3346,3641,4000,6645,3144,6646,3098,2812,4188,3642,3204,6647,3863, # 5360
3476,6648,3864,6649,4426,4001,6650,6651,6652,2576,6653,4189,4777,6654,6655,6656, # 5376
2846,6657,3477,3205,4002,6658,4003,6659,3347,2252,6660,6661,6662,4778,6663,6664, # 5392
6665,6666,6667,6668,6669,4779,4780,2048,6670,3478,3099,6671,3556,3747,4004,6672, # 5408
6673,6674,3145,4005,3748,6675,6676,6677,6678,6679,3408,6680,6681,6682,6683,3206, # 5424
3207,6684,6685,4781,4427,6686,4782,4783,4784,6687,6688,6689,4190,6690,6691,3479, # 5440
6692,2746,6693,4428,6694,6695,6696,6697,6698,6699,4785,6700,6701,3208,2727,6702, # 5456
3146,6703,6704,3409,2196,6705,4429,6706,6707,6708,2534,1996,6709,6710,6711,2747, # 5472
6712,6713,6714,4786,3643,6715,4430,4431,6716,3557,6717,4432,4433,6718,6719,6720, # 5488
6721,3749,6722,4006,4787,6723,6724,3644,4788,4434,6725,6726,4789,2772,6727,6728, # 5504
6729,6730,6731,2708,3865,2813,4435,6732,6733,4790,4791,3480,6734,6735,6736,6737, # 5520
4436,3348,6738,3410,4007,6739,6740,4008,6741,6742,4792,3411,4191,6743,6744,6745, # 5536
6746,6747,3866,6748,3750,6749,6750,6751,6752,6753,6754,6755,3867,6756,4009,6757, # 5552
4793,4794,6758,2814,2987,6759,6760,6761,4437,6762,6763,6764,6765,3645,6766,6767, # 5568
3481,4192,6768,3751,6769,6770,2174,6771,3868,3752,6772,6773,6774,4193,4795,4438, # 5584
3558,4796,4439,6775,4797,6776,6777,4798,6778,4799,3559,4800,6779,6780,6781,3482, # 5600
6782,2893,6783,6784,4194,4801,4010,6785,6786,4440,6787,4011,6788,6789,6790,6791, # 5616
6792,6793,4802,6794,6795,6796,4012,6797,6798,6799,6800,3349,4803,3483,6801,4804, # 5632
4195,6802,4013,6803,6804,4196,6805,4014,4015,6806,2847,3271,2848,6807,3484,6808, # 5648
6809,6810,4441,6811,4442,4197,4443,3272,4805,6812,3412,4016,1579,6813,6814,4017, # 5664
6815,3869,6816,2964,6817,4806,6818,6819,4018,3646,6820,6821,4807,4019,4020,6822, # 5680
6823,3560,6824,6825,4021,4444,6826,4198,6827,6828,4445,6829,6830,4199,4808,6831, # 5696
6832,6833,3870,3019,2458,6834,3753,3413,3350,6835,4809,3871,4810,3561,4446,6836, # 5712
6837,4447,4811,4812,6838,2459,4448,6839,4449,6840,6841,4022,3872,6842,4813,4814, # 5728
6843,6844,4815,4200,4201,4202,6845,4023,6846,6847,4450,3562,3873,6848,6849,4816, # 5744
4817,6850,4451,4818,2139,6851,3563,6852,6853,3351,6854,6855,3352,4024,2709,3414, # 5760
4203,4452,6856,4204,6857,6858,3874,3875,6859,6860,4819,6861,6862,6863,6864,4453, # 5776
3647,6865,6866,4820,6867,6868,6869,6870,4454,6871,2869,6872,6873,4821,6874,3754, # 5792
6875,4822,4205,6876,6877,6878,3648,4206,4455,6879,4823,6880,4824,3876,6881,3055, # 5808
4207,6882,3415,6883,6884,6885,4208,4209,6886,4210,3353,6887,3354,3564,3209,3485, # 5824
2652,6888,2728,6889,3210,3755,6890,4025,4456,6891,4825,6892,6893,6894,6895,4211, # 5840
6896,6897,6898,4826,6899,6900,4212,6901,4827,6902,2773,3565,6903,4828,6904,6905, # 5856
6906,6907,3649,3650,6908,2849,3566,6909,3567,3100,6910,6911,6912,6913,6914,6915, # 5872
4026,6916,3355,4829,3056,4457,3756,6917,3651,6918,4213,3652,2870,6919,4458,6920, # 5888
2438,6921,6922,3757,2774,4830,6923,3356,4831,4832,6924,4833,4459,3653,2507,6925, # 5904
4834,2535,6926,6927,3273,4027,3147,6928,3568,6929,6930,6931,4460,6932,3877,4461, # 5920
2729,3654,6933,6934,6935,6936,2175,4835,2630,4214,4028,4462,4836,4215,6937,3148, # 5936
4216,4463,4837,4838,4217,6938,6939,2850,4839,6940,4464,6941,6942,6943,4840,6944, # 5952
4218,3274,4465,6945,6946,2710,6947,4841,4466,6948,6949,2894,6950,6951,4842,6952, # 5968
4219,3057,2871,6953,6954,6955,6956,4467,6957,2711,6958,6959,6960,3275,3101,4843, # 5984
6961,3357,3569,6962,4844,6963,6964,4468,4845,3570,6965,3102,4846,3758,6966,4847, # 6000
3878,4848,4849,4029,6967,2929,3879,4850,4851,6968,6969,1733,6970,4220,6971,6972, # 6016
6973,6974,6975,6976,4852,6977,6978,6979,6980,6981,6982,3759,6983,6984,6985,3486, # 6032
3487,6986,3488,3416,6987,6988,6989,6990,6991,6992,6993,6994,6995,6996,6997,4853, # 6048
6998,6999,4030,7000,7001,3211,7002,7003,4221,7004,7005,3571,4031,7006,3572,7007, # 6064
2614,4854,2577,7008,7009,2965,3655,3656,4855,2775,3489,3880,4222,4856,3881,4032, # 6080
3882,3657,2730,3490,4857,7010,3149,7011,4469,4858,2496,3491,4859,2283,7012,7013, # 6096
7014,2365,4860,4470,7015,7016,3760,7017,7018,4223,1917,7019,7020,7021,4471,7022, # 6112
2776,4472,7023,7024,7025,7026,4033,7027,3573,4224,4861,4034,4862,7028,7029,1929, # 6128
3883,4035,7030,4473,3058,7031,2536,3761,3884,7032,4036,7033,2966,2895,1968,4474, # 6144
3276,4225,3417,3492,4226,2105,7034,7035,1754,2596,3762,4227,4863,4475,3763,4864, # 6160
3764,2615,2777,3103,3765,3658,3418,4865,2296,3766,2815,7036,7037,7038,3574,2872, # 6176
3277,4476,7039,4037,4477,7040,7041,4038,7042,7043,7044,7045,7046,7047,2537,7048, # 6192
7049,7050,7051,7052,7053,7054,4478,7055,7056,3767,3659,4228,3575,7057,7058,4229, # 6208
7059,7060,7061,3660,7062,3212,7063,3885,4039,2460,7064,7065,7066,7067,7068,7069, # 6224
7070,7071,7072,7073,7074,4866,3768,4867,7075,7076,7077,7078,4868,3358,3278,2653, # 6240
7079,7080,4479,3886,7081,7082,4869,7083,7084,7085,7086,7087,7088,2538,7089,7090, # 6256
7091,4040,3150,3769,4870,4041,2896,3359,4230,2930,7092,3279,7093,2967,4480,3213, # 6272
4481,3661,7094,7095,7096,7097,7098,7099,7100,7101,7102,2461,3770,7103,7104,4231, # 6288
3151,7105,7106,7107,4042,3662,7108,7109,4871,3663,4872,4043,3059,7110,7111,7112, # 6304
3493,2988,7113,4873,7114,7115,7116,3771,4874,7117,7118,4232,4875,7119,3576,2336, # 6320
4876,7120,4233,3419,4044,4877,4878,4482,4483,4879,4484,4234,7121,3772,4880,1045, # 6336
3280,3664,4881,4882,7122,7123,7124,7125,4883,7126,2778,7127,4485,4486,7128,4884, # 6352
3214,3887,7129,7130,3215,7131,4885,4045,7132,7133,4046,7134,7135,7136,7137,7138, # 6368
7139,7140,7141,7142,7143,4235,7144,4886,7145,7146,7147,4887,7148,7149,7150,4487, # 6384
4047,4488,7151,7152,4888,4048,2989,3888,7153,3665,7154,4049,7155,7156,7157,7158, # 6400
7159,7160,2931,4889,4890,4489,7161,2631,3889,4236,2779,7162,7163,4891,7164,3060, # 6416
7165,1672,4892,7166,4893,4237,3281,4894,7167,7168,3666,7169,3494,7170,7171,4050, # 6432
7172,7173,3104,3360,3420,4490,4051,2684,4052,7174,4053,7175,7176,7177,2253,4054, # 6448
7178,7179,4895,7180,3152,3890,3153,4491,3216,7181,7182,7183,2968,4238,4492,4055, # 6464
7184,2990,7185,2479,7186,7187,4493,7188,7189,7190,7191,7192,4896,7193,4897,2969, # 6480
4494,4898,7194,3495,7195,7196,4899,4495,7197,3105,2731,7198,4900,7199,7200,7201, # 6496
4056,7202,3361,7203,7204,4496,4901,4902,7205,4497,7206,7207,2315,4903,7208,4904, # 6512
7209,4905,2851,7210,7211,3577,7212,3578,4906,7213,4057,3667,4907,7214,4058,2354, # 6528
3891,2376,3217,3773,7215,7216,7217,7218,7219,4498,7220,4908,3282,2685,7221,3496, # 6544
4909,2632,3154,4910,7222,2337,7223,4911,7224,7225,7226,4912,4913,3283,4239,4499, # 6560
7227,2816,7228,7229,7230,7231,7232,7233,7234,4914,4500,4501,7235,7236,7237,2686, # 6576
7238,4915,7239,2897,4502,7240,4503,7241,2516,7242,4504,3362,3218,7243,7244,7245, # 6592
4916,7246,7247,4505,3363,7248,7249,7250,7251,3774,4506,7252,7253,4917,7254,7255, # 6608
3284,2991,4918,4919,3219,3892,4920,3106,3497,4921,7256,7257,7258,4922,7259,4923, # 6624
3364,4507,4508,4059,7260,4240,3498,7261,7262,4924,7263,2992,3893,4060,3220,7264, # 6640
7265,7266,7267,7268,7269,4509,3775,7270,2817,7271,4061,4925,4510,3776,7272,4241, # 6656
4511,3285,7273,7274,3499,7275,7276,7277,4062,4512,4926,7278,3107,3894,7279,7280, # 6672
4927,7281,4513,7282,7283,3668,7284,7285,4242,4514,4243,7286,2058,4515,4928,4929, # 6688
4516,7287,3286,4244,7288,4517,7289,7290,7291,3669,7292,7293,4930,4931,4932,2355, # 6704
4933,7294,2633,4518,7295,4245,7296,7297,4519,7298,7299,4520,4521,4934,7300,4246, # 6720
4522,7301,7302,7303,3579,7304,4247,4935,7305,4936,7306,7307,7308,7309,3777,7310, # 6736
4523,7311,7312,7313,4248,3580,7314,4524,3778,4249,7315,3581,7316,3287,7317,3221, # 6752
7318,4937,7319,7320,7321,7322,7323,7324,4938,4939,7325,4525,7326,7327,7328,4063, # 6768
7329,7330,4940,7331,7332,4941,7333,4526,7334,3500,2780,1741,4942,2026,1742,7335, # 6784
7336,3582,4527,2388,7337,7338,7339,4528,7340,4250,4943,7341,7342,7343,4944,7344, # 6800
7345,7346,3020,7347,4945,7348,7349,7350,7351,3895,7352,3896,4064,3897,7353,7354, # 6816
7355,4251,7356,7357,3898,7358,3779,7359,3780,3288,7360,7361,4529,7362,4946,4530, # 6832
2027,7363,3899,4531,4947,3222,3583,7364,4948,7365,7366,7367,7368,4949,3501,4950, # 6848
3781,4951,4532,7369,2517,4952,4252,4953,3155,7370,4954,4955,4253,2518,4533,7371, # 6864
7372,2712,4254,7373,7374,7375,3670,4956,3671,7376,2389,3502,4065,7377,2338,7378, # 6880
7379,7380,7381,3061,7382,4957,7383,7384,7385,7386,4958,4534,7387,7388,2993,7389, # 6896
3062,7390,4959,7391,7392,7393,4960,3108,4961,7394,4535,7395,4962,3421,4536,7396, # 6912
4963,7397,4964,1857,7398,4965,7399,7400,2176,3584,4966,7401,7402,3422,4537,3900, # 6928
3585,7403,3782,7404,2852,7405,7406,7407,4538,3783,2654,3423,4967,4539,7408,3784, # 6944
3586,2853,4540,4541,7409,3901,7410,3902,7411,7412,3785,3109,2327,3903,7413,7414, # 6960
2970,4066,2932,7415,7416,7417,3904,3672,3424,7418,4542,4543,4544,7419,4968,7420, # 6976
7421,4255,7422,7423,7424,7425,7426,4067,7427,3673,3365,4545,7428,3110,2559,3674, # 6992
7429,7430,3156,7431,7432,3503,7433,3425,4546,7434,3063,2873,7435,3223,4969,4547, # 7008
4548,2898,4256,4068,7436,4069,3587,3786,2933,3787,4257,4970,4971,3788,7437,4972, # 7024
3064,7438,4549,7439,7440,7441,7442,7443,4973,3905,7444,2874,7445,7446,7447,7448, # 7040
3021,7449,4550,3906,3588,4974,7450,7451,3789,3675,7452,2578,7453,4070,7454,7455, # 7056
7456,4258,3676,7457,4975,7458,4976,4259,3790,3504,2634,4977,3677,4551,4260,7459, # 7072
7460,7461,7462,3907,4261,4978,7463,7464,7465,7466,4979,4980,7467,7468,2213,4262, # 7088
7469,7470,7471,3678,4981,7472,2439,7473,4263,3224,3289,7474,3908,2415,4982,7475, # 7104
4264,7476,4983,2655,7477,7478,2732,4552,2854,2875,7479,7480,4265,7481,4553,4984, # 7120
7482,7483,4266,7484,3679,3366,3680,2818,2781,2782,3367,3589,4554,3065,7485,4071, # 7136
2899,7486,7487,3157,2462,4072,4555,4073,4985,4986,3111,4267,2687,3368,4556,4074, # 7152
3791,4268,7488,3909,2783,7489,2656,1962,3158,4557,4987,1963,3159,3160,7490,3112, # 7168
4988,4989,3022,4990,4991,3792,2855,7491,7492,2971,4558,7493,7494,4992,7495,7496, # 7184
7497,7498,4993,7499,3426,4559,4994,7500,3681,4560,4269,4270,3910,7501,4075,4995, # 7200
4271,7502,7503,4076,7504,4996,7505,3225,4997,4272,4077,2819,3023,7506,7507,2733, # 7216
4561,7508,4562,7509,3369,3793,7510,3590,2508,7511,7512,4273,3113,2994,2616,7513, # 7232
7514,7515,7516,7517,7518,2820,3911,4078,2748,7519,7520,4563,4998,7521,7522,7523, # 7248
7524,4999,4274,7525,4564,3682,2239,4079,4565,7526,7527,7528,7529,5000,7530,7531, # 7264
5001,4275,3794,7532,7533,7534,3066,5002,4566,3161,7535,7536,4080,7537,3162,7538, # 7280
7539,4567,7540,7541,7542,7543,7544,7545,5003,7546,4568,7547,7548,7549,7550,7551, # 7296
7552,7553,7554,7555,7556,5004,7557,7558,7559,5005,7560,3795,7561,4569,7562,7563, # 7312
7564,2821,3796,4276,4277,4081,7565,2876,7566,5006,7567,7568,2900,7569,3797,3912, # 7328
7570,7571,7572,4278,7573,7574,7575,5007,7576,7577,5008,7578,7579,4279,2934,7580, # 7344
7581,5009,7582,4570,7583,4280,7584,7585,7586,4571,4572,3913,7587,4573,3505,7588, # 7360
5010,7589,7590,7591,7592,3798,4574,7593,7594,5011,7595,4281,7596,7597,7598,4282, # 7376
5012,7599,7600,5013,3163,7601,5014,7602,3914,7603,7604,2734,4575,4576,4577,7605, # 7392
7606,7607,7608,7609,3506,5015,4578,7610,4082,7611,2822,2901,2579,3683,3024,4579, # 7408
3507,7612,4580,7613,3226,3799,5016,7614,7615,7616,7617,7618,7619,7620,2995,3290, # 7424
7621,4083,7622,5017,7623,7624,7625,7626,7627,4581,3915,7628,3291,7629,5018,7630, # 7440
7631,7632,7633,4084,7634,7635,3427,3800,7636,7637,4582,7638,5019,4583,5020,7639, # 7456
3916,7640,3801,5021,4584,4283,7641,7642,3428,3591,2269,7643,2617,7644,4585,3592, # 7472
7645,4586,2902,7646,7647,3227,5022,7648,4587,7649,4284,7650,7651,7652,4588,2284, # 7488
7653,5023,7654,7655,7656,4589,5024,3802,7657,7658,5025,3508,4590,7659,7660,7661, # 7504
1969,5026,7662,7663,3684,1821,2688,7664,2028,2509,4285,7665,2823,1841,7666,2689, # 7520
3114,7667,3917,4085,2160,5027,5028,2972,7668,5029,7669,7670,7671,3593,4086,7672, # 7536
4591,4087,5030,3803,7673,7674,7675,7676,7677,7678,7679,4286,2366,4592,4593,3067, # 7552
2328,7680,7681,4594,3594,3918,2029,4287,7682,5031,3919,3370,4288,4595,2856,7683, # 7568
3509,7684,7685,5032,5033,7686,7687,3804,2784,7688,7689,7690,7691,3371,7692,7693, # 7584
2877,5034,7694,7695,3920,4289,4088,7696,7697,7698,5035,7699,5036,4290,5037,5038, # 7600
5039,7700,7701,7702,5040,5041,3228,7703,1760,7704,5042,3229,4596,2106,4089,7705, # 7616
4597,2824,5043,2107,3372,7706,4291,4090,5044,7707,4091,7708,5045,3025,3805,4598, # 7632
4292,4293,4294,3373,7709,4599,7710,5046,7711,7712,5047,5048,3806,7713,7714,7715, # 7648
5049,7716,7717,7718,7719,4600,5050,7720,7721,7722,5051,7723,4295,3429,7724,7725, # 7664
7726,7727,3921,7728,3292,5052,4092,7729,7730,7731,7732,7733,7734,7735,5053,5054, # 7680
7736,7737,7738,7739,3922,3685,7740,7741,7742,7743,2635,5055,7744,5056,4601,7745, # 7696
7746,2560,7747,7748,7749,7750,3923,7751,7752,7753,7754,7755,4296,2903,7756,7757, # 7712
7758,7759,7760,3924,7761,5057,4297,7762,7763,5058,4298,7764,4093,7765,7766,5059, # 7728
3925,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,3595,7777,4299,5060,4094, # 7744
7778,3293,5061,7779,7780,4300,7781,7782,4602,7783,3596,7784,7785,3430,2367,7786, # 7760
3164,5062,5063,4301,7787,7788,4095,5064,5065,7789,3374,3115,7790,7791,7792,7793, # 7776
7794,7795,7796,3597,4603,7797,7798,3686,3116,3807,5066,7799,7800,5067,7801,7802, # 7792
4604,4302,5068,4303,4096,7803,7804,3294,7805,7806,5069,4605,2690,7807,3026,7808, # 7808
7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824, # 7824
7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7840
7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855,7856, # 7856
7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871,7872, # 7872
7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887,7888, # 7888
7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903,7904, # 7904
7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919,7920, # 7920
7921,7922,7923,7924,3926,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935, # 7936
7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951, # 7952
7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967, # 7968
7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983, # 7984
7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999, # 8000
8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015, # 8016
8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031, # 8032
8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047, # 8048
8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063, # 8064
8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079, # 8080
8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095, # 8096
8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111, # 8112
8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127, # 8128
8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143, # 8144
8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159, # 8160
8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175, # 8176
8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191, # 8192
8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207, # 8208
8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223, # 8224
8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239, # 8240
8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255, # 8256
8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271) # 8272
# flake8: noqa
| gpl-2.0 |
marratj/ansible | lib/ansible/modules/cloud/amazon/ec2_tag.py | 25 | 5559 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: ec2_tag
short_description: create and remove tag(s) to ec2 resources.
description:
- Creates, removes and lists tags from any EC2 resource. The resource is referenced by its resource id (e.g. an instance being i-XXXXXXX).
It is designed to be used with complex args (tags), see the examples. This module has a dependency on python-boto.
version_added: "1.3"
options:
resource:
description:
- The EC2 resource id.
required: true
default: null
aliases: []
state:
description:
- Whether the tags should be present or absent on the resource. Use list to interrogate the tags of an instance.
required: false
default: present
choices: ['present', 'absent', 'list']
aliases: []
tags:
description:
- a hash/dictionary of tags to add to the resource; '{"key":"value"}' and '{"key":"value","key":"value"}'
required: true
default: null
aliases: []
author: "Lester Wade (@lwade)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: Ensure tags are present on a resource
ec2_tag:
region: eu-west-1
resource: vol-XXXXXX
state: present
tags:
Name: ubervol
env: prod
- name: Ensure one dbserver is running
ec2:
count_tag:
Name: dbserver
Env: production
exact_count: 1
group: '{{ security_group }}'
keypair: '{{ keypair }}'
image: '{{ image_id }}'
instance_tags:
Name: dbserver
Env: production
instance_type: '{{ instance_type }}'
region: eu-west-1
volumes:
- device_name: /dev/xvdb
device_type: standard
volume_size: 10
delete_on_termination: True
wait: True
register: ec2
- name: Retrieve all volumes for a queried instance
ec2_vol:
instance: '{{ item.id }}'
region: eu-west-1
state: list
with_items: '{{ ec2.tagged_instances }}'
register: ec2_vol
- name: Ensure all volumes are tagged
ec2_tag:
region: eu-west-1
resource: '{{ item.id }}'
state: present
tags:
Name: dbserver
Env: production
with_items:
- ec2_vol.volumes
- name: Get EC2 facts
action: ec2_facts
- name: Retrieve all tags on an instance
ec2_tag:
region: '{{ ansible_ec2_placement_region }}'
resource: '{{ ansible_ec2_instance_id }}'
state: list
register: ec2_tags
- name: List tags, such as Name and env
debug:
msg: '{{ ec2_tags.tags.Name }} {{ ec2_tags.tags.env }}'
'''
try:
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import HAS_BOTO, ec2_argument_spec, ec2_connect
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
resource = dict(required=True),
tags = dict(type='dict'),
state = dict(default='present', choices=['present', 'absent', 'list']),
)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
resource = module.params.get('resource')
tags = module.params.get('tags')
state = module.params.get('state')
ec2 = ec2_connect(module)
# We need a comparison here so that we can accurately report back changed status.
# Need to expand the gettags return format and compare with "tags" and then tag or detag as appropriate.
filters = {'resource-id' : resource}
gettags = ec2.get_all_tags(filters=filters)
dictadd = {}
dictremove = {}
baddict = {}
tagdict = {}
for tag in gettags:
tagdict[tag.name] = tag.value
if state == 'present':
if not tags:
module.fail_json(msg="tags argument is required when state is present")
if set(tags.items()).issubset(set(tagdict.items())):
module.exit_json(msg="Tags already exists in %s." %resource, changed=False)
else:
for (key, value) in set(tags.items()):
if (key, value) not in set(tagdict.items()):
dictadd[key] = value
if not module.check_mode:
ec2.create_tags(resource, dictadd)
module.exit_json(msg="Tags %s created for resource %s." % (dictadd,resource), changed=True)
if state == 'absent':
if not tags:
module.fail_json(msg="tags argument is required when state is absent")
for (key, value) in set(tags.items()):
if (key, value) not in set(tagdict.items()):
baddict[key] = value
if set(baddict) == set(tags):
module.exit_json(msg="Nothing to remove here. Move along.", changed=False)
for (key, value) in set(tags.items()):
if (key, value) in set(tagdict.items()):
dictremove[key] = value
if not module.check_mode:
ec2.delete_tags(resource, dictremove)
module.exit_json(msg="Tags %s removed for resource %s." % (dictremove,resource), changed=True)
if state == 'list':
module.exit_json(changed=False, tags=tagdict)
if __name__ == '__main__':
main()
| gpl-3.0 |
markflorisson/minivect | minivect/minicode.py | 1 | 5303 | """
Code writers and formatters. Subclass CodeWriter to suit the needs of
a certain code generator backend.
"""
try:
from Cython.Compiler import Tempita as tempita
except ImportError:
try:
import tempita
except ImportError:
tempita = None
class CodeWriter(object):
"""
Write code as objects for later assembly.
.. attribute:: loop_levels
CodeWriter objects just before the start of each loop
.. attribute:: tiled_loop_levels
same as loop_levels, but takes into account tiled loop patterns
.. attribute:: cleanup_levels
CodeWriter objects just after the end of each loop
.. attribute:: declaration_levels
same as loop_levels, but a valid insertion point for C89 declarations
"""
error_handler = None
def __init__(self, context, buffer=None):
self.buffer = buffer or _CodeTree()
self.context = context
self.loop_levels = []
self.tiled_loop_levels = []
self.declaration_levels = []
@classmethod
def clone(cls, other, context, buffer):
return cls(context, buffer)
def insertion_point(self):
"""
Create an insertion point for the code writer. Any code written
to this insertion point (later on) is inserted in the output code at
the point where this method was called.
"""
result = self.clone(self, self.context, self.buffer.insertion_point())
result.loop_levels = list(self.loop_levels)
result.tiled_loop_levels = list(self.tiled_loop_levels)
result.declaration_levels = list(self.declaration_levels)
return result
def write(self, value):
self.buffer.output.append(value)
def put_label(self, label):
"Insert a label in the code"
self.write(label)
def put_goto(self, label):
"Jump to a label. Implement in subclasses"
class CCodeWriter(CodeWriter):
"""
Code writer to write C code. Has both a prototype buffer and an
implementation buffer. The prototype buffer will contain the C
prototypes, and the implementation buffer the actual function
code.
"""
def __init__(self, context, buffer=None, proto_code=None):
super(CCodeWriter, self).__init__(context, buffer)
if proto_code is None:
self.proto_code = type(self)(context, proto_code=False)
self.indent = 0
def put_label(self, label):
"Insert a C label"
self.putln('%s:' % self.mangle(label.name))
def put_goto(self, label):
"Jump to (goto) a label"
self.putln("goto %s;" % self.mangle(label.name))
def putln(self, s):
"Write a code string as a line. Also performs indentation"
self.indent -= s.count('}')
self.write("%s%s\n" % (self.indent * ' ', s))
self.indent += s.count('{')
def mangle(self, s):
"Mangle symbol names"
return "__mini_mangle_%s" % s
@classmethod
def clone(cls, other, context, buffer):
result = super(CCodeWriter, cls).clone(other, context, buffer)
result.indent = other.indent
return result
def sub_tempita(s, context, file=None, name=None):
"Run the tempita template engine on string the given string."
if not s:
return None
if file:
context['__name'] = "%s:%s" % (file, name)
elif name:
context['__name'] = name
if tempita is None:
raise RuntimeError("Tempita was not installed")
return tempita.sub(s, **context)
class TempitaCodeWriter(CodeWriter):
"""
Code writer which supports writing Tempita strings. See
http://pythonpaste.org/tempita/ for documentation on Tempita.
"""
def putln(self, string, context_dict):
self.write(sub_tempita(string) + '\n')
class CodeFormatter(object):
"""
Default code formatting, which returns the formatted code as a list
of objects (the ones written to the :py:class:`minivect.codegen.CodeWriter`)
"""
def format(self, codewriter):
return codewriter.buffer.getvalue()
class CodeStringFormatter(CodeFormatter):
"Format code as strings"
def format(self, codewriter):
return "".join(codewriter.buffer.getvalue())
class CCodeStringFormatter(CodeStringFormatter):
"Format the prototype and code implementation"
def format(self, codewriter):
return ("".join(codewriter.proto_code.buffer.getvalue()),
"".join(codewriter.buffer.getvalue()))
class _CodeTree(object):
"""
See Cython/StringIOTree
"""
def __init__(self, output=None, condition=None):
self.prepended_children = []
self.output = output or []
def _getvalue(self, result):
for child in self.prepended_children:
child._getvalue(result)
result.extend(self.output)
def getvalue(self):
result = []
self._getvalue(result)
return result
def clone(self, output=None):
return type(self)(output)
def commit(self):
if self.output:
self.prepended_children.append(self.clone(self.output))
self.output = []
def insertion_point(self):
self.commit()
ip = self.clone()
self.prepended_children.append(ip)
return ip | bsd-2-clause |
normanmaurer/autobahntestsuite-maven-plugin | src/main/resources/twisted/test/stdio_test_producer.py | 40 | 1497 | # -*- test-case-name: twisted.test.test_stdio.StandardInputOutputTestCase.test_producer -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Main program for the child process run by
L{twisted.test.test_stdio.StandardInputOutputTestCase.test_producer} to test
that process transports implement IProducer properly.
"""
import sys, _preamble
from twisted.internet import stdio, protocol
from twisted.python import log, reflect
class ProducerChild(protocol.Protocol):
_paused = False
buf = ''
def connectionLost(self, reason):
log.msg("*****OVER*****")
reactor.callLater(1, reactor.stop)
# reactor.stop()
def dataReceived(self, bytes):
self.buf += bytes
if self._paused:
log.startLogging(sys.stderr)
log.msg("dataReceived while transport paused!")
self.transport.loseConnection()
else:
self.transport.write(bytes)
if self.buf.endswith('\n0\n'):
self.transport.loseConnection()
else:
self.pause()
def pause(self):
self._paused = True
self.transport.pauseProducing()
reactor.callLater(0.01, self.unpause)
def unpause(self):
self._paused = False
self.transport.resumeProducing()
if __name__ == '__main__':
reflect.namedAny(sys.argv[1]).install()
from twisted.internet import reactor
stdio.StandardIO(ProducerChild())
reactor.run()
| apache-2.0 |
MrTheodor/espressopp | src/FixedQuadrupleList.py | 7 | 3188 | # Copyright (C) 2012,2013,2015,2016
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
*****************************
espressopp.FixedQuadrupleList
*****************************
.. function:: espressopp.FixedQuadrupleList(storage)
:param storage:
:type storage:
.. function:: espressopp.FixedQuadrupleList.add(pid1, pid2, pid3, pid4)
:param pid1:
:param pid2:
:param pid3:
:param pid4:
:type pid1:
:type pid2:
:type pid3:
:type pid4:
:rtype:
.. function:: espressopp.FixedQuadrupleList.addQuadruples(quadruplelist)
:param quadruplelist:
:type quadruplelist:
:rtype:
.. function:: espressopp.FixedQuadrupleList.remove()
remove the FixedPairList and disconnect
.. function:: espressopp.FixedQuadrupleList.getQuadruples()
:rtype:
.. function:: espressopp.FixedQuadrupleList.size()
:rtype:
"""
from espressopp import pmi
import _espressopp
import espressopp
from espressopp.esutil import cxxinit
class FixedQuadrupleListLocal(_espressopp.FixedQuadrupleList):
def __init__(self, storage):
if pmi.workerIsActive():
cxxinit(self, _espressopp.FixedQuadrupleList, storage)
def add(self, pid1, pid2, pid3, pid4):
if pmi.workerIsActive():
return self.cxxclass.add(self, pid1, pid2, pid3, pid4)
def size(self):
if pmi.workerIsActive():
return self.cxxclass.size(self)
def addQuadruples(self, quadruplelist):
"""
Each processor takes the broadcasted quadruplelist and
adds those quadruples whose first particle is owned by
this processor.
"""
if pmi.workerIsActive():
for quadruple in quadruplelist:
pid1, pid2, pid3, pid4 = quadruple
self.cxxclass.add(self, pid1, pid2, pid3, pid4)
def remove(self):
if pmi.workerIsActive():
self.cxxclass.remove(self)
def getQuadruples(self):
if pmi.workerIsActive():
quadruple = self.cxxclass.getQuadruples(self)
return quadruple
if pmi.isController:
class FixedQuadrupleList(object):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.FixedQuadrupleListLocal',
localcall = [ "add" ],
pmicall = [ "addQuadruples","remove" ],
pmiinvoke = ["getQuadruples", "size"]
)
| gpl-3.0 |
hfp/tensorflow-xsmm | tensorflow/contrib/tensor_forest/hybrid/python/__init__.py | 183 | 1053 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Initialize tensor_forest/hybrid/python."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.hybrid.python import layers
from tensorflow.contrib.tensor_forest.hybrid.python import models
from tensorflow.contrib.tensor_forest.hybrid.python.ops import training_ops
| apache-2.0 |
PierreFaniel/openerp-7.0 | mail_organizer/__init__.py | 3 | 1091 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2010-2014 Elico Corp. All Rights Reserved.
# Augustin Cisterne-Kaas <augustin.cisterne-kaas@elico-corp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import model
import message
import wizard
| agpl-3.0 |
grpc/grpc | src/python/grpcio_tests/tests/unit/_resource_exhausted_test.py | 16 | 8869 | # Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests server responding with RESOURCE_EXHAUSTED."""
import threading
import unittest
import logging
import grpc
from grpc import _channel
from grpc.framework.foundation import logging_pool
from tests.unit import test_common
from tests.unit.framework.common import test_constants
_REQUEST = b'\x00\x00\x00'
_RESPONSE = b'\x00\x00\x00'
_UNARY_UNARY = '/test/UnaryUnary'
_UNARY_STREAM = '/test/UnaryStream'
_STREAM_UNARY = '/test/StreamUnary'
_STREAM_STREAM = '/test/StreamStream'
class _TestTrigger(object):
def __init__(self, total_call_count):
self._total_call_count = total_call_count
self._pending_calls = 0
self._triggered = False
self._finish_condition = threading.Condition()
self._start_condition = threading.Condition()
# Wait for all calls be blocked in their handler
def await_calls(self):
with self._start_condition:
while self._pending_calls < self._total_call_count:
self._start_condition.wait()
# Block in a response handler and wait for a trigger
def await_trigger(self):
with self._start_condition:
self._pending_calls += 1
self._start_condition.notify()
with self._finish_condition:
if not self._triggered:
self._finish_condition.wait()
# Finish all response handlers
def trigger(self):
with self._finish_condition:
self._triggered = True
self._finish_condition.notify_all()
def handle_unary_unary(trigger, request, servicer_context):
trigger.await_trigger()
return _RESPONSE
def handle_unary_stream(trigger, request, servicer_context):
trigger.await_trigger()
for _ in range(test_constants.STREAM_LENGTH):
yield _RESPONSE
def handle_stream_unary(trigger, request_iterator, servicer_context):
trigger.await_trigger()
# TODO(issue:#6891) We should be able to remove this loop
for request in request_iterator:
pass
return _RESPONSE
def handle_stream_stream(trigger, request_iterator, servicer_context):
trigger.await_trigger()
# TODO(issue:#6891) We should be able to remove this loop,
# and replace with return; yield
for request in request_iterator:
yield _RESPONSE
class _MethodHandler(grpc.RpcMethodHandler):
def __init__(self, trigger, request_streaming, response_streaming):
self.request_streaming = request_streaming
self.response_streaming = response_streaming
self.request_deserializer = None
self.response_serializer = None
self.unary_unary = None
self.unary_stream = None
self.stream_unary = None
self.stream_stream = None
if self.request_streaming and self.response_streaming:
self.stream_stream = (
lambda x, y: handle_stream_stream(trigger, x, y))
elif self.request_streaming:
self.stream_unary = lambda x, y: handle_stream_unary(trigger, x, y)
elif self.response_streaming:
self.unary_stream = lambda x, y: handle_unary_stream(trigger, x, y)
else:
self.unary_unary = lambda x, y: handle_unary_unary(trigger, x, y)
class _GenericHandler(grpc.GenericRpcHandler):
def __init__(self, trigger):
self._trigger = trigger
def service(self, handler_call_details):
if handler_call_details.method == _UNARY_UNARY:
return _MethodHandler(self._trigger, False, False)
elif handler_call_details.method == _UNARY_STREAM:
return _MethodHandler(self._trigger, False, True)
elif handler_call_details.method == _STREAM_UNARY:
return _MethodHandler(self._trigger, True, False)
elif handler_call_details.method == _STREAM_STREAM:
return _MethodHandler(self._trigger, True, True)
else:
return None
class ResourceExhaustedTest(unittest.TestCase):
def setUp(self):
self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
self._trigger = _TestTrigger(test_constants.THREAD_CONCURRENCY)
self._server = grpc.server(
self._server_pool,
handlers=(_GenericHandler(self._trigger),),
options=(('grpc.so_reuseport', 0),),
maximum_concurrent_rpcs=test_constants.THREAD_CONCURRENCY)
port = self._server.add_insecure_port('[::]:0')
self._server.start()
self._channel = grpc.insecure_channel('localhost:%d' % port)
def tearDown(self):
self._server.stop(0)
self._channel.close()
def testUnaryUnary(self):
multi_callable = self._channel.unary_unary(_UNARY_UNARY)
futures = []
for _ in range(test_constants.THREAD_CONCURRENCY):
futures.append(multi_callable.future(_REQUEST))
self._trigger.await_calls()
with self.assertRaises(grpc.RpcError) as exception_context:
multi_callable(_REQUEST)
self.assertEqual(grpc.StatusCode.RESOURCE_EXHAUSTED,
exception_context.exception.code())
future_exception = multi_callable.future(_REQUEST)
self.assertEqual(grpc.StatusCode.RESOURCE_EXHAUSTED,
future_exception.exception().code())
self._trigger.trigger()
for future in futures:
self.assertEqual(_RESPONSE, future.result())
# Ensure a new request can be handled
self.assertEqual(_RESPONSE, multi_callable(_REQUEST))
def testUnaryStream(self):
multi_callable = self._channel.unary_stream(_UNARY_STREAM)
calls = []
for _ in range(test_constants.THREAD_CONCURRENCY):
calls.append(multi_callable(_REQUEST))
self._trigger.await_calls()
with self.assertRaises(grpc.RpcError) as exception_context:
next(multi_callable(_REQUEST))
self.assertEqual(grpc.StatusCode.RESOURCE_EXHAUSTED,
exception_context.exception.code())
self._trigger.trigger()
for call in calls:
for response in call:
self.assertEqual(_RESPONSE, response)
# Ensure a new request can be handled
new_call = multi_callable(_REQUEST)
for response in new_call:
self.assertEqual(_RESPONSE, response)
def testStreamUnary(self):
multi_callable = self._channel.stream_unary(_STREAM_UNARY)
futures = []
request = iter([_REQUEST] * test_constants.STREAM_LENGTH)
for _ in range(test_constants.THREAD_CONCURRENCY):
futures.append(multi_callable.future(request))
self._trigger.await_calls()
with self.assertRaises(grpc.RpcError) as exception_context:
multi_callable(request)
self.assertEqual(grpc.StatusCode.RESOURCE_EXHAUSTED,
exception_context.exception.code())
future_exception = multi_callable.future(request)
self.assertEqual(grpc.StatusCode.RESOURCE_EXHAUSTED,
future_exception.exception().code())
self._trigger.trigger()
for future in futures:
self.assertEqual(_RESPONSE, future.result())
# Ensure a new request can be handled
self.assertEqual(_RESPONSE, multi_callable(request))
def testStreamStream(self):
multi_callable = self._channel.stream_stream(_STREAM_STREAM)
calls = []
request = iter([_REQUEST] * test_constants.STREAM_LENGTH)
for _ in range(test_constants.THREAD_CONCURRENCY):
calls.append(multi_callable(request))
self._trigger.await_calls()
with self.assertRaises(grpc.RpcError) as exception_context:
next(multi_callable(request))
self.assertEqual(grpc.StatusCode.RESOURCE_EXHAUSTED,
exception_context.exception.code())
self._trigger.trigger()
for call in calls:
for response in call:
self.assertEqual(_RESPONSE, response)
# Ensure a new request can be handled
new_call = multi_callable(request)
for response in new_call:
self.assertEqual(_RESPONSE, response)
if __name__ == '__main__':
logging.basicConfig()
unittest.main(verbosity=2)
| apache-2.0 |
sherazkasi/SabreSoftware | Lib/site-packages/scipy/stats/rv.py | 58 | 1481 |
from numpy import vectorize
from numpy.random import random_sample
__all__ = ['randwppf', 'randwcdf']
# XXX: Are these needed anymore?
#####################################
# General purpose continuous
######################################
def randwppf(ppf, args=(), size=None):
"""returns an array of randomly distributed integers of a distribution
whose percent point function (inverse of the CDF) is given.
args is a tuple of extra arguments to the ppf function (i.e. shape,
location, scale), and size is the size of the output. Note the ppf
function must accept an array of q values to compute over.
"""
U = random_sample(size=size)
return apply(ppf, (U,)+args)
def randwcdf(cdf, mean=1.0, args=(), size=None):
"""returns an array of randomly distributed integers of a distribution
whose cumulative distribution function (CDF) is given.
mean is the mean of the distribution (helps the solver).
args is a tuple of extra arguments to the cdf function (i.e. shape,
location, scale), and size is the size of the output. Note the
cdf function needs to accept a single value to compute over.
"""
import scipy.optimize as optimize
def _ppfopt(x, q, *nargs):
newargs = (x,)+nargs
return cdf(*newargs) - q
def _ppf(q, *nargs):
return optimize.fsolve(_ppfopt, mean, args=(q,)+nargs)
_vppf = vectorize(_ppf)
U = random_sample(size=size)
return apply(_vppf,(U,)+args)
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.