repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
AsgerPetersen/QGIS
python/ext-libs/future/future/types/newint.py
68
13233
""" Backport of Python 3's int, based on Py2's long. They are very similar. The most notable difference is: - representation: trailing L in Python 2 removed in Python 3 """ from __future__ import division import struct import collections from future.types.newbytes import newbytes from future.types.newobject import newobject from future.utils import PY3, isint, istext, isbytes, with_metaclass, native if PY3: long = int class BaseNewInt(type): def __instancecheck__(cls, instance): if cls == newint: # Special case for Py2 short or long int return isinstance(instance, (int, long)) else: return issubclass(instance.__class__, cls) class newint(with_metaclass(BaseNewInt, long)): """ A backport of the Python 3 int object to Py2 """ def __new__(cls, x=0, base=10): """ From the Py3 int docstring: | int(x=0) -> integer | int(x, base=10) -> integer | | Convert a number or string to an integer, or return 0 if no | arguments are given. If x is a number, return x.__int__(). For | floating point numbers, this truncates towards zero. | | If x is not a number or if base is given, then x must be a string, | bytes, or bytearray instance representing an integer literal in the | given base. The literal can be preceded by '+' or '-' and be | surrounded by whitespace. The base defaults to 10. Valid bases are | 0 and 2-36. Base 0 means to interpret the base from the string as an | integer literal. | >>> int('0b100', base=0) | 4 """ try: val = x.__int__() except AttributeError: val = x else: if not isint(val): raise TypeError('__int__ returned non-int ({0})'.format( type(val))) if base != 10: # Explicit base if not (istext(val) or isbytes(val) or isinstance(val, bytearray)): raise TypeError( "int() can't convert non-string with explicit base") try: return super(newint, cls).__new__(cls, val, base) except TypeError: return super(newint, cls).__new__(cls, newbytes(val), base) # After here, base is 10 try: return super(newint, cls).__new__(cls, val) except TypeError: # Py2 long doesn't handle bytearray input with an explicit base, so # handle this here. # Py3: int(bytearray(b'10'), 2) == 2 # Py2: int(bytearray(b'10'), 2) == 2 raises TypeError # Py2: long(bytearray(b'10'), 2) == 2 raises TypeError try: return super(newint, cls).__new__(cls, newbytes(val)) except: raise TypeError("newint argument must be a string or a number," "not '{0}'".format(type(val))) def __repr__(self): """ Without the L suffix """ value = super(newint, self).__repr__() assert value[-1] == 'L' return value[:-1] def __add__(self, other): value = super(newint, self).__add__(other) if value is NotImplemented: return long(self) + other return newint(value) def __radd__(self, other): value = super(newint, self).__radd__(other) if value is NotImplemented: return other + long(self) return newint(value) def __sub__(self, other): value = super(newint, self).__sub__(other) if value is NotImplemented: return long(self) - other return newint(value) def __rsub__(self, other): value = super(newint, self).__rsub__(other) if value is NotImplemented: return other - long(self) return newint(value) def __mul__(self, other): value = super(newint, self).__mul__(other) if isint(value): return newint(value) elif value is NotImplemented: return long(self) * other return value def __rmul__(self, other): value = super(newint, self).__rmul__(other) if isint(value): return newint(value) elif value is NotImplemented: return other * long(self) return value def __div__(self, other): # We override this rather than e.g. relying on object.__div__ or # long.__div__ because we want to wrap the value in a newint() # call if other is another int value = long(self) / other if isinstance(other, (int, long)): return newint(value) else: return value def __rdiv__(self, other): value = other / long(self) if isinstance(other, (int, long)): return newint(value) else: return value def __idiv__(self, other): # long has no __idiv__ method. Use __itruediv__ and cast back to # newint: value = self.__itruediv__(other) if isinstance(other, (int, long)): return newint(value) else: return value def __truediv__(self, other): value = super(newint, self).__truediv__(other) if value is NotImplemented: value = long(self) / other return value def __rtruediv__(self, other): return super(newint, self).__rtruediv__(other) def __itruediv__(self, other): # long has no __itruediv__ method mylong = long(self) mylong /= other return mylong def __floordiv__(self, other): return newint(super(newint, self).__floordiv__(other)) def __rfloordiv__(self, other): return newint(super(newint, self).__rfloordiv__(other)) def __ifloordiv__(self, other): # long has no __ifloordiv__ method mylong = long(self) mylong //= other return newint(mylong) def __mod__(self, other): value = super(newint, self).__mod__(other) if value is NotImplemented: return long(self) % other return newint(value) def __rmod__(self, other): value = super(newint, self).__rmod__(other) if value is NotImplemented: return other % long(self) return newint(value) def __divmod__(self, other): value = super(newint, self).__divmod__(other) if value is NotImplemented: mylong = long(self) return (mylong // other, mylong % other) return (newint(value[0]), newint(value[1])) def __rdivmod__(self, other): value = super(newint, self).__rdivmod__(other) if value is NotImplemented: mylong = long(self) return (other // mylong, other % mylong) return (newint(value[0]), newint(value[1])) def __pow__(self, other): value = super(newint, self).__pow__(other) if value is NotImplemented: return long(self) ** other return newint(value) def __rpow__(self, other): value = super(newint, self).__rpow__(other) if value is NotImplemented: return other ** long(self) return newint(value) def __lshift__(self, other): if not isint(other): raise TypeError( "unsupported operand type(s) for <<: '%s' and '%s'" % (type(self).__name__, type(other).__name__)) return newint(super(newint, self).__lshift__(other)) def __rshift__(self, other): if not isint(other): raise TypeError( "unsupported operand type(s) for >>: '%s' and '%s'" % (type(self).__name__, type(other).__name__)) return newint(super(newint, self).__rshift__(other)) def __and__(self, other): if not isint(other): raise TypeError( "unsupported operand type(s) for &: '%s' and '%s'" % (type(self).__name__, type(other).__name__)) return newint(super(newint, self).__and__(other)) def __or__(self, other): if not isint(other): raise TypeError( "unsupported operand type(s) for |: '%s' and '%s'" % (type(self).__name__, type(other).__name__)) return newint(super(newint, self).__or__(other)) def __xor__(self, other): if not isint(other): raise TypeError( "unsupported operand type(s) for ^: '%s' and '%s'" % (type(self).__name__, type(other).__name__)) return newint(super(newint, self).__xor__(other)) def __neg__(self): return newint(super(newint, self).__neg__()) def __pos__(self): return newint(super(newint, self).__pos__()) def __abs__(self): return newint(super(newint, self).__abs__()) def __invert__(self): return newint(super(newint, self).__invert__()) def __int__(self): return self def __nonzero__(self): return self.__bool__() def __bool__(self): """ So subclasses can override this, Py3-style """ return super(newint, self).__nonzero__() def __native__(self): return long(self) def to_bytes(self, length, byteorder='big', signed=False): """ Return an array of bytes representing an integer. The integer is represented using length bytes. An OverflowError is raised if the integer is not representable with the given number of bytes. The byteorder argument determines the byte order used to represent the integer. If byteorder is 'big', the most significant byte is at the beginning of the byte array. If byteorder is 'little', the most significant byte is at the end of the byte array. To request the native byte order of the host system, use `sys.byteorder' as the byte order value. The signed keyword-only argument determines whether two's complement is used to represent the integer. If signed is False and a negative integer is given, an OverflowError is raised. """ if length < 0: raise ValueError("length argument must be non-negative") if length == 0 and self == 0: return newbytes() if signed and self < 0: bits = length * 8 num = (2**bits) + self if num <= 0: raise OverflowError("int too smal to convert") else: if self < 0: raise OverflowError("can't convert negative int to unsigned") num = self if byteorder not in ('little', 'big'): raise ValueError("byteorder must be either 'little' or 'big'") h = b'%x' % num s = newbytes((b'0'*(len(h) % 2) + h).zfill(length*2).decode('hex')) if signed: high_set = s[0] & 0x80 if self > 0 and high_set: raise OverflowError("int too big to convert") if self < 0 and not high_set: raise OverflowError("int too small to convert") if len(s) > length: raise OverflowError("int too big to convert") return s if byteorder == 'big' else s[::-1] @classmethod def from_bytes(cls, mybytes, byteorder='big', signed=False): """ Return the integer represented by the given array of bytes. The mybytes argument must either support the buffer protocol or be an iterable object producing bytes. Bytes and bytearray are examples of built-in objects that support the buffer protocol. The byteorder argument determines the byte order used to represent the integer. If byteorder is 'big', the most significant byte is at the beginning of the byte array. If byteorder is 'little', the most significant byte is at the end of the byte array. To request the native byte order of the host system, use `sys.byteorder' as the byte order value. The signed keyword-only argument indicates whether two's complement is used to represent the integer. """ if byteorder not in ('little', 'big'): raise ValueError("byteorder must be either 'little' or 'big'") if isinstance(mybytes, unicode): raise TypeError("cannot convert unicode objects to bytes") # mybytes can also be passed as a sequence of integers on Py3. # Test for this: elif isinstance(mybytes, collections.Iterable): mybytes = newbytes(mybytes) b = mybytes if byteorder == 'big' else mybytes[::-1] if len(b) == 0: b = b'\x00' # The encode() method has been disabled by newbytes, but Py2's # str has it: num = int(native(b).encode('hex'), 16) if signed and (b[0] & 0x80): num = num - (2 ** (len(b)*8)) return cls(num) # def _twos_comp(val, bits): # """compute the 2's compliment of int value val""" # if( (val&(1<<(bits-1))) != 0 ): # val = val - (1<<bits) # return val __all__ = ['newint']
gpl-2.0
deepakantony/sms-tools
workspace_2014/A1/A1Part2.py
1
1706
import sys import os sys.path.append('../../software/models/') from utilFunctions import wavread import scipy.io.wavfile import numpy as np """ A1-Part-2: Basic operations with audio Write a function that reads an audio file and returns the minimum and the maximum values of the audio samples in that file. The input to the function is the wav file name (including the path) and the output should be two floating point values. If you run your code using oboe-A4.wav as the input, the function should return the following output: (-0.83486432, 0.56501967) """ INT16_FAC = (2**15)-1 INT32_FAC = (2**31)-1 INT64_FAC = (2**63)-1 norm_fact = {'int16':INT16_FAC, 'int32':INT32_FAC, 'int64':INT64_FAC,'float32':1.0,'float64':1.0} def minMaxAudio(inputFile): """ Input: inputFile: file name of the wav file (including path) Output: A tuple of the minimum and the maximum value of the audio samples, like: (min_val, max_val) """ sampleRate, audioData = scipy.io.wavfile.read(inputFile) if (len(audioData.shape) !=1): # raise error if more than one channel raise ValueError("Audio file should be mono") if (sampleRate !=44100): # raise error if more than one channel raise ValueError("Sampling rate of input sound should be 44100") if (len(audioData) < 50010): raise ValueError("Input sound should atleast have 50010 samples in it") #scale down and convert audio into floating point numbber in range of -1 to 1 audioData = np.float32(audioData)/norm_fact[audioData.dtype.name] return min(audioData), max(audioData)
agpl-3.0
Perferom/android_external_chromium_org
tools/telemetry/telemetry/page/actions/click_element.py
24
1955
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import re from telemetry.core import util from telemetry.core import exceptions from telemetry.page.actions import page_action def _EscapeSelector(selector): return selector.replace('\'', '\\\'') class ClickElementAction(page_action.PageAction): def __init__(self, attributes=None): super(ClickElementAction, self).__init__(attributes) def RunAction(self, page, tab, previous_action): def DoClick(): if hasattr(self, 'selector'): code = ('document.querySelector(\'' + _EscapeSelector(self.selector) + '\').click();') try: tab.ExecuteJavaScript(code) except exceptions.EvaluateException: raise page_action.PageActionFailed( 'Cannot find element with selector ' + self.selector) elif hasattr(self, 'text'): callback_code = 'function(element) { element.click(); }' try: util.FindElementAndPerformAction(tab, self.text, callback_code) except exceptions.EvaluateException: raise page_action.PageActionFailed( 'Cannot find element with text ' + self.text) elif hasattr(self, 'xpath'): code = ('document.evaluate("%s",' 'document,' 'null,' 'XPathResult.FIRST_ORDERED_NODE_TYPE,' 'null)' '.singleNodeValue.click()' % re.escape(self.xpath)) try: tab.ExecuteJavaScript(code) except exceptions.EvaluateException: raise page_action.PageActionFailed( 'Cannot find element with xpath ' + self.xpath) else: raise page_action.PageActionFailed( 'No condition given to click_element') DoClick()
bsd-3-clause
google/gazoo-device
gazoo_device/base_classes/nrf_connect_sdk_device.py
1
3562
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Base class module for nRF Connect SDK platform device.""" import os from typing import Dict, Tuple from gazoo_device import custom_types from gazoo_device import decorators from gazoo_device import errors from gazoo_device import gdm_logger from gazoo_device.base_classes import auxiliary_device from gazoo_device.capabilities import flash_build_jlink from gazoo_device.switchboard import switchboard from gazoo_device.utility import usb_utils logger = gdm_logger.get_logger() BAUDRATE = 115200 _NRF_JLINK_NAME = "NRF52840_XXAA" class NRFConnectSDKDevice(auxiliary_device.AuxiliaryDevice): """Base class for nRF Connect SDK devices. nRF Connect SDK devices from Nordic which runs Zephyr RTOS. """ COMMUNICATION_TYPE = "PigweedSerialComms" _COMMUNICATION_KWARGS = {"protobufs": None, "baudrate": BAUDRATE} @decorators.LogDecorator(logger) def get_detection_info(self) -> Tuple[Dict[str, str], Dict[str, str]]: """Gets the persistent and optional attributes of a device during setup. Returns: Dictionary of persistent attributes and dictionary of optional attributes. """ persistent_dict = self.props["persistent_identifiers"] address = persistent_dict["console_port_name"] persistent_dict["serial_number"] = ( usb_utils.get_serial_number_from_path(address)) persistent_dict["model"] = "PROTO" return persistent_dict, {} @classmethod def is_connected(cls, device_config: custom_types.ManagerDeviceConfigDict) -> bool: """Returns True if the device is connected to the host.""" return os.path.exists(device_config["persistent"]["console_port_name"]) @decorators.PersistentProperty def os(self) -> str: return "Zephyr RTOS" @decorators.PersistentProperty def platform(self) -> str: return "nRF Connect" @decorators.CapabilityDecorator(switchboard.SwitchboardDefault) def switchboard(self): """Instance for communicating with the device.""" if self._COMMUNICATION_KWARGS.get("protobufs") is None: raise errors.DeviceError( "Calling switchboard from a non Pigweed device {}".format(self.name)) name = self._get_private_capability_name(switchboard.SwitchboardDefault) if not hasattr(self, name): kwargs = self._COMMUNICATION_KWARGS.copy() kwargs.update({ "communication_address": self.communication_address, "communication_type": self.COMMUNICATION_TYPE, "log_path": self.log_file_name, "device_name": self.name, "event_parser": None}) setattr(self, name, self.manager_weakref().create_switchboard(**kwargs)) return getattr(self, name) @decorators.CapabilityDecorator(flash_build_jlink.FlashBuildJLink) def flash_build(self): return self.lazy_init(flash_build_jlink.FlashBuildJLink, device_name=self.name, serial_number=self.serial_number, platform_name=_NRF_JLINK_NAME)
apache-2.0
chauhanhardik/populo_2
lms/djangoapps/certificates/views/xqueue.py
51
9592
""" Views used by XQueue certificate generation. """ import json import logging from django.contrib.auth.models import User from django.http import HttpResponse, Http404, HttpResponseForbidden from django.views.decorators.csrf import csrf_exempt from django.views.decorators.http import require_POST import dogstats_wrapper as dog_stats_api from capa.xqueue_interface import XQUEUE_METRIC_NAME from xmodule.modulestore.django import modulestore from opaque_keys.edx.locations import SlashSeparatedCourseKey from util.json_request import JsonResponse, JsonResponseBadRequest from util.bad_request_rate_limiter import BadRequestRateLimiter from certificates.api import generate_user_certificates from certificates.models import ( certificate_status_for_student, CertificateStatuses, GeneratedCertificate, ExampleCertificate, ) log = logging.getLogger(__name__) @csrf_exempt def request_certificate(request): """Request the on-demand creation of a certificate for some user, course. A request doesn't imply a guarantee that such a creation will take place. We intentionally use the same machinery as is used for doing certification at the end of a course run, so that we can be sure users get graded and then if and only if they pass, do they get a certificate issued. """ if request.method == "POST": if request.user.is_authenticated(): username = request.user.username student = User.objects.get(username=username) course_key = SlashSeparatedCourseKey.from_deprecated_string(request.POST.get('course_id')) course = modulestore().get_course(course_key, depth=2) status = certificate_status_for_student(student, course_key)['status'] if status in [CertificateStatuses.unavailable, CertificateStatuses.notpassing, CertificateStatuses.error]: log_msg = u'Grading and certification requested for user %s in course %s via /request_certificate call' log.info(log_msg, username, course_key) status = generate_user_certificates(student, course_key, course=course) return HttpResponse(json.dumps({'add_status': status}), mimetype='application/json') return HttpResponse(json.dumps({'add_status': 'ERRORANONYMOUSUSER'}), mimetype='application/json') @csrf_exempt def update_certificate(request): """ Will update GeneratedCertificate for a new certificate or modify an existing certificate entry. See models.py for a state diagram of certificate states This view should only ever be accessed by the xqueue server """ status = CertificateStatuses if request.method == "POST": xqueue_body = json.loads(request.POST.get('xqueue_body')) xqueue_header = json.loads(request.POST.get('xqueue_header')) try: course_key = SlashSeparatedCourseKey.from_deprecated_string(xqueue_body['course_id']) cert = GeneratedCertificate.objects.get( user__username=xqueue_body['username'], course_id=course_key, key=xqueue_header['lms_key']) except GeneratedCertificate.DoesNotExist: log.critical( 'Unable to lookup certificate\n' 'xqueue_body: %s\n' 'xqueue_header: %s', xqueue_body, xqueue_header ) return HttpResponse(json.dumps({ 'return_code': 1, 'content': 'unable to lookup key' }), mimetype='application/json') if 'error' in xqueue_body: cert.status = status.error if 'error_reason' in xqueue_body: # Hopefully we will record a meaningful error # here if something bad happened during the # certificate generation process # # example: # (aamorm BerkeleyX/CS169.1x/2012_Fall) # <class 'simples3.bucket.S3Error'>: # HTTP error (reason=error(32, 'Broken pipe'), filename=None) : # certificate_agent.py:175 cert.error_reason = xqueue_body['error_reason'] else: if cert.status in [status.generating, status.regenerating]: cert.download_uuid = xqueue_body['download_uuid'] cert.verify_uuid = xqueue_body['verify_uuid'] cert.download_url = xqueue_body['url'] cert.status = status.downloadable elif cert.status in [status.deleting]: cert.status = status.deleted else: log.critical( 'Invalid state for cert update: %s', cert.status ) return HttpResponse( json.dumps({ 'return_code': 1, 'content': 'invalid cert status' }), mimetype='application/json' ) dog_stats_api.increment(XQUEUE_METRIC_NAME, tags=[ u'action:update_certificate', u'course_id:{}'.format(cert.course_id) ]) cert.save() return HttpResponse(json.dumps({'return_code': 0}), mimetype='application/json') @csrf_exempt @require_POST def update_example_certificate(request): """Callback from the XQueue that updates example certificates. Example certificates are used to verify that certificate generation is configured correctly for a course. Unlike other certificates, example certificates are not associated with a particular user or displayed to students. For this reason, we need a different end-point to update the status of generated example certificates. Arguments: request (HttpRequest) Returns: HttpResponse (200): Status was updated successfully. HttpResponse (400): Invalid parameters. HttpResponse (403): Rate limit exceeded for bad requests. HttpResponse (404): Invalid certificate identifier or access key. """ log.info(u"Received response for example certificate from XQueue.") rate_limiter = BadRequestRateLimiter() # Check the parameters and rate limits # If these are invalid, return an error response. if rate_limiter.is_rate_limit_exceeded(request): log.info(u"Bad request rate limit exceeded for update example certificate end-point.") return HttpResponseForbidden("Rate limit exceeded") if 'xqueue_body' not in request.POST: log.info(u"Missing parameter 'xqueue_body' for update example certificate end-point") rate_limiter.tick_bad_request_counter(request) return JsonResponseBadRequest("Parameter 'xqueue_body' is required.") if 'xqueue_header' not in request.POST: log.info(u"Missing parameter 'xqueue_header' for update example certificate end-point") rate_limiter.tick_bad_request_counter(request) return JsonResponseBadRequest("Parameter 'xqueue_header' is required.") try: xqueue_body = json.loads(request.POST['xqueue_body']) xqueue_header = json.loads(request.POST['xqueue_header']) except (ValueError, TypeError): log.info(u"Could not decode params to example certificate end-point as JSON.") rate_limiter.tick_bad_request_counter(request) return JsonResponseBadRequest("Parameters must be JSON-serialized.") # Attempt to retrieve the example certificate record # so we can update the status. try: uuid = xqueue_body.get('username') access_key = xqueue_header.get('lms_key') cert = ExampleCertificate.objects.get(uuid=uuid, access_key=access_key) except ExampleCertificate.DoesNotExist: # If we are unable to retrieve the record, it means the uuid or access key # were not valid. This most likely means that the request is NOT coming # from the XQueue. Return a 404 and increase the bad request counter # to protect against a DDOS attack. log.info(u"Could not find example certificate with uuid '%s' and access key '%s'", uuid, access_key) rate_limiter.tick_bad_request_counter(request) raise Http404 if 'error' in xqueue_body: # If an error occurs, save the error message so we can fix the issue. error_reason = xqueue_body.get('error_reason') cert.update_status(ExampleCertificate.STATUS_ERROR, error_reason=error_reason) log.warning( ( u"Error occurred during example certificate generation for uuid '%s'. " u"The error response was '%s'." ), uuid, error_reason ) else: # If the certificate generated successfully, save the download URL # so we can display the example certificate. download_url = xqueue_body.get('url') if download_url is None: rate_limiter.tick_bad_request_counter(request) log.warning(u"No download URL provided for example certificate with uuid '%s'.", uuid) return JsonResponseBadRequest( "Parameter 'download_url' is required for successfully generated certificates." ) else: cert.update_status(ExampleCertificate.STATUS_SUCCESS, download_url=download_url) log.info("Successfully updated example certificate with uuid '%s'.", uuid) # Let the XQueue know that we handled the response return JsonResponse({'return_code': 0})
agpl-3.0
Sendoushi/servo
python/tidy/servo_tidy_tests/test_tidy.py
1
4818
# Copyright 2013 The Servo Project Developers. See the COPYRIGHT # file at the top-level directory of this distribution. # # Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or # http://www.apache.org/licenses/LICENSE-2.0> or the MIT license # <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your # option. This file may not be copied, modified, or distributed # except according to those terms. import os import unittest from servo_tidy import tidy base_path = 'servo_tidy_tests/' if os.path.exists('servo_tidy_tests/') else 'python/tidy/servo_tidy_tests/' def iterFile(name): return iter([os.path.join(base_path, name)]) class CheckTidiness(unittest.TestCase): def assertNoMoreErrors(self, errors): with self.assertRaises(StopIteration): errors.next() def test_spaces_correctnes(self): errors = tidy.collect_errors_for_files(iterFile('wrong_space.rs'), [], [tidy.check_by_line]) self.assertEqual('trailing whitespace', errors.next()[2]) self.assertEqual('no newline at EOF', errors.next()[2]) self.assertEqual('tab on line', errors.next()[2]) self.assertEqual('CR on line', errors.next()[2]) self.assertEqual('no newline at EOF', errors.next()[2]) self.assertNoMoreErrors(errors) def test_long_line(self): errors = tidy.collect_errors_for_files(iterFile('long_line.rs'), [], [tidy.check_by_line]) self.assertEqual('Line is longer than 120 characters', errors.next()[2]) self.assertNoMoreErrors(errors) def test_whatwg_link(self): errors = tidy.collect_errors_for_files(iterFile('whatwg_link.rs'), [], [tidy.check_by_line]) self.assertTrue('link to WHATWG may break in the future, use this format instead:' in errors.next()[2]) self.assertTrue('links to WHATWG single-page url, change to multi page:' in errors.next()[2]) self.assertNoMoreErrors(errors) def test_licence(self): errors = tidy.collect_errors_for_files(iterFile('incorrect_license.rs'), [], [tidy.check_license]) self.assertEqual('incorrect license', errors.next()[2]) self.assertNoMoreErrors(errors) def test_rust(self): errors = tidy.collect_errors_for_files(iterFile('rust_tidy.rs'), [], [tidy.check_rust]) self.assertEqual('use statement spans multiple lines', errors.next()[2]) self.assertEqual('missing space before }', errors.next()[2]) self.assertTrue('use statement is not in alphabetical order' in errors.next()[2]) self.assertEqual('encountered whitespace following a use statement', errors.next()[2]) self.assertTrue('mod declaration is not in alphabetical order' in errors.next()[2]) self.assertEqual('mod declaration spans multiple lines', errors.next()[2]) self.assertTrue('extern crate declaration is not in alphabetical order' in errors.next()[2]) self.assertEqual('missing space before ->', errors.next()[2]) self.assertEqual('missing space after ->', errors.next()[2]) self.assertEqual('missing space after :', errors.next()[2]) self.assertEqual('missing space before {', errors.next()[2]) self.assertEqual('missing space before =', errors.next()[2]) self.assertEqual('missing space after =', errors.next()[2]) self.assertEqual('missing space before -', errors.next()[2]) self.assertEqual('missing space before *', errors.next()[2]) self.assertEqual('missing space after =>', errors.next()[2]) self.assertEqual('extra space before :', errors.next()[2]) self.assertEqual('extra space before :', errors.next()[2]) self.assertEqual('use &[T] instead of &Vec<T>', errors.next()[2]) self.assertEqual('use &str instead of &String', errors.next()[2]) self.assertNoMoreErrors(errors) def test_spec_link(self): tidy.spec_base_path = base_path errors = tidy.collect_errors_for_files(iterFile('speclink.rs'), [], [tidy.check_spec]) self.assertEqual('method declared in webidl is missing a comment with a specification link', errors.next()[2]) self.assertNoMoreErrors(errors) def test_webidl(self): errors = tidy.collect_errors_for_files(iterFile('spec.webidl'), [tidy.check_webidl_spec], []) self.assertEqual('No specification link found.', errors.next()[2]) self.assertNoMoreErrors(errors) def test_toml(self): errors = tidy.collect_errors_for_files(iterFile('test.toml'), [tidy.check_toml], []) self.assertEqual('found asterisk instead of minimum version number', errors.next()[2]) self.assertNoMoreErrors(errors) def do_tests(): suite = unittest.TestLoader().loadTestsFromTestCase(CheckTidiness) unittest.TextTestRunner(verbosity=2).run(suite)
mpl-2.0
isohybrid/dotfile
vim/bundle/git:--github.com-klen-python-mode/pylibs/logilab/common/hg.py
24
4819
# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved. # contact http://www.logilab.fr/ -- mailto:contact@logilab.fr # # This file is part of logilab-common. # # logilab-common is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the Free # Software Foundation, either version 2.1 of the License, or (at your option) any # later version. # # logilab-common is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # details. # # You should have received a copy of the GNU Lesser General Public License along # with logilab-common. If not, see <http://www.gnu.org/licenses/>. """mercurial utilities (mercurial should be installed)""" __docformat__ = "restructuredtext en" import os import sys import os.path as osp try: from mercurial.error import RepoError from mercurial.__version__ import version as hg_version except ImportError: from mercurial.repo import RepoError from mercurial.version import get_version hg_version = get_version() from mercurial.hg import repository as Repository from mercurial.ui import ui as Ui from mercurial.node import short try: # mercurial >= 1.2 (?) from mercurial.cmdutil import walkchangerevs except ImportError, ex: from mercurial.commands import walkchangerevs try: # mercurial >= 1.1 (.1?) from mercurial.util import cachefunc except ImportError, ex: def cachefunc(func): return func try: # mercurial >= 1.3.1 from mercurial import encoding _encoding = encoding.encoding except ImportError: try: from mercurial.util import _encoding except ImportError: import locale # stay compatible with mercurial 0.9.1 (etch debian release) # (borrowed from mercurial.util 1.1.2) try: _encoding = os.environ.get("HGENCODING") if sys.platform == 'darwin' and not _encoding: # On darwin, getpreferredencoding ignores the locale environment and # always returns mac-roman. We override this if the environment is # not C (has been customized by the user). locale.setlocale(locale.LC_CTYPE, '') _encoding = locale.getlocale()[1] if not _encoding: _encoding = locale.getpreferredencoding() or 'ascii' except locale.Error: _encoding = 'ascii' try: # demandimport causes problems when activated, ensure it isn't # XXX put this in apycot where the pb has been noticed? from mercurial import demandimport demandimport.disable() except: pass Ui.warn = lambda *args, **kwargs: 0 # make it quiet def find_repository(path): """returns <path>'s mercurial repository None if <path> is not under hg control """ path = osp.realpath(osp.abspath(path)) while not osp.isdir(osp.join(path, ".hg")): oldpath = path path = osp.dirname(path) if path == oldpath: return None return path def get_repository(path): """Simple function that open a hg repository""" repopath = find_repository(path) if repopath is None: raise RuntimeError('no repository found in %s' % osp.abspath(path)) return Repository(Ui(), path=repopath) def incoming(wdrepo, masterrepo): try: return wdrepo.findincoming(masterrepo) except AttributeError: from mercurial import hg, discovery revs, checkout = hg.addbranchrevs(wdrepo, masterrepo, ('', []), None) common, incoming, rheads = discovery.findcommonincoming( wdrepo, masterrepo, heads=revs) if not masterrepo.local(): from mercurial import bundlerepo, changegroup if revs is None and masterrepo.capable('changegroupsubset'): revs = rheads if revs is None: cg = masterrepo.changegroup(incoming, "incoming") else: cg = masterrepo.changegroupsubset(incoming, revs, 'incoming') fname = changegroup.writebundle(cg, None, "HG10UN") # use the created uncompressed bundlerepo masterrepo = bundlerepo.bundlerepository(wdrepo.ui, wdrepo.root, fname) return masterrepo.changelog.nodesbetween(incoming, revs)[0] def outgoing(wdrepo, masterrepo): try: return wdrepo.findoutgoing(masterrepo) except AttributeError: from mercurial import hg, discovery revs, checkout = hg.addbranchrevs(wdrepo, wdrepo, ('', []), None) o = discovery.findoutgoing(wdrepo, masterrepo) return wdrepo.changelog.nodesbetween(o, revs)[0]
bsd-2-clause
joebowen/LogMyRocket_API
LogMyRocket/libraries/sys_packages/requests/packages/charade/euckrfreq.py
3121
45978
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### # Sampling from about 20M text materials include literature and computer technology # 128 --> 0.79 # 256 --> 0.92 # 512 --> 0.986 # 1024 --> 0.99944 # 2048 --> 0.99999 # # Idea Distribution Ratio = 0.98653 / (1-0.98653) = 73.24 # Random Distribution Ration = 512 / (2350-512) = 0.279. # # Typical Distribution Ratio EUCKR_TYPICAL_DISTRIBUTION_RATIO = 6.0 EUCKR_TABLE_SIZE = 2352 # Char to FreqOrder table , EUCKRCharToFreqOrder = ( \ 13, 130, 120,1396, 481,1719,1720, 328, 609, 212,1721, 707, 400, 299,1722, 87, 1397,1723, 104, 536,1117,1203,1724,1267, 685,1268, 508,1725,1726,1727,1728,1398, 1399,1729,1730,1731, 141, 621, 326,1057, 368,1732, 267, 488, 20,1733,1269,1734, 945,1400,1735, 47, 904,1270,1736,1737, 773, 248,1738, 409, 313, 786, 429,1739, 116, 987, 813,1401, 683, 75,1204, 145,1740,1741,1742,1743, 16, 847, 667, 622, 708,1744,1745,1746, 966, 787, 304, 129,1747, 60, 820, 123, 676,1748,1749,1750, 1751, 617,1752, 626,1753,1754,1755,1756, 653,1757,1758,1759,1760,1761,1762, 856, 344,1763,1764,1765,1766, 89, 401, 418, 806, 905, 848,1767,1768,1769, 946,1205, 709,1770,1118,1771, 241,1772,1773,1774,1271,1775, 569,1776, 999,1777,1778,1779, 1780, 337, 751,1058, 28, 628, 254,1781, 177, 906, 270, 349, 891,1079,1782, 19, 1783, 379,1784, 315,1785, 629, 754,1402, 559,1786, 636, 203,1206,1787, 710, 567, 1788, 935, 814,1789,1790,1207, 766, 528,1791,1792,1208,1793,1794,1795,1796,1797, 1403,1798,1799, 533,1059,1404,1405,1156,1406, 936, 884,1080,1800, 351,1801,1802, 1803,1804,1805, 801,1806,1807,1808,1119,1809,1157, 714, 474,1407,1810, 298, 899, 885,1811,1120, 802,1158,1812, 892,1813,1814,1408, 659,1815,1816,1121,1817,1818, 1819,1820,1821,1822, 319,1823, 594, 545,1824, 815, 937,1209,1825,1826, 573,1409, 1022,1827,1210,1828,1829,1830,1831,1832,1833, 556, 722, 807,1122,1060,1834, 697, 1835, 900, 557, 715,1836,1410, 540,1411, 752,1159, 294, 597,1211, 976, 803, 770, 1412,1837,1838, 39, 794,1413, 358,1839, 371, 925,1840, 453, 661, 788, 531, 723, 544,1023,1081, 869, 91,1841, 392, 430, 790, 602,1414, 677,1082, 457,1415,1416, 1842,1843, 475, 327,1024,1417, 795, 121,1844, 733, 403,1418,1845,1846,1847, 300, 119, 711,1212, 627,1848,1272, 207,1849,1850, 796,1213, 382,1851, 519,1852,1083, 893,1853,1854,1855, 367, 809, 487, 671,1856, 663,1857,1858, 956, 471, 306, 857, 1859,1860,1160,1084,1861,1862,1863,1864,1865,1061,1866,1867,1868,1869,1870,1871, 282, 96, 574,1872, 502,1085,1873,1214,1874, 907,1875,1876, 827, 977,1419,1420, 1421, 268,1877,1422,1878,1879,1880, 308,1881, 2, 537,1882,1883,1215,1884,1885, 127, 791,1886,1273,1423,1887, 34, 336, 404, 643,1888, 571, 654, 894, 840,1889, 0, 886,1274, 122, 575, 260, 908, 938,1890,1275, 410, 316,1891,1892, 100,1893, 1894,1123, 48,1161,1124,1025,1895, 633, 901,1276,1896,1897, 115, 816,1898, 317, 1899, 694,1900, 909, 734,1424, 572, 866,1425, 691, 85, 524,1010, 543, 394, 841, 1901,1902,1903,1026,1904,1905,1906,1907,1908,1909, 30, 451, 651, 988, 310,1910, 1911,1426, 810,1216, 93,1912,1913,1277,1217,1914, 858, 759, 45, 58, 181, 610, 269,1915,1916, 131,1062, 551, 443,1000, 821,1427, 957, 895,1086,1917,1918, 375, 1919, 359,1920, 687,1921, 822,1922, 293,1923,1924, 40, 662, 118, 692, 29, 939, 887, 640, 482, 174,1925, 69,1162, 728,1428, 910,1926,1278,1218,1279, 386, 870, 217, 854,1163, 823,1927,1928,1929,1930, 834,1931, 78,1932, 859,1933,1063,1934, 1935,1936,1937, 438,1164, 208, 595,1938,1939,1940,1941,1219,1125,1942, 280, 888, 1429,1430,1220,1431,1943,1944,1945,1946,1947,1280, 150, 510,1432,1948,1949,1950, 1951,1952,1953,1954,1011,1087,1955,1433,1043,1956, 881,1957, 614, 958,1064,1065, 1221,1958, 638,1001, 860, 967, 896,1434, 989, 492, 553,1281,1165,1959,1282,1002, 1283,1222,1960,1961,1962,1963, 36, 383, 228, 753, 247, 454,1964, 876, 678,1965, 1966,1284, 126, 464, 490, 835, 136, 672, 529, 940,1088,1435, 473,1967,1968, 467, 50, 390, 227, 587, 279, 378, 598, 792, 968, 240, 151, 160, 849, 882,1126,1285, 639,1044, 133, 140, 288, 360, 811, 563,1027, 561, 142, 523,1969,1970,1971, 7, 103, 296, 439, 407, 506, 634, 990,1972,1973,1974,1975, 645,1976,1977,1978,1979, 1980,1981, 236,1982,1436,1983,1984,1089, 192, 828, 618, 518,1166, 333,1127,1985, 818,1223,1986,1987,1988,1989,1990,1991,1992,1993, 342,1128,1286, 746, 842,1994, 1995, 560, 223,1287, 98, 8, 189, 650, 978,1288,1996,1437,1997, 17, 345, 250, 423, 277, 234, 512, 226, 97, 289, 42, 167,1998, 201,1999,2000, 843, 836, 824, 532, 338, 783,1090, 182, 576, 436,1438,1439, 527, 500,2001, 947, 889,2002,2003, 2004,2005, 262, 600, 314, 447,2006, 547,2007, 693, 738,1129,2008, 71,1440, 745, 619, 688,2009, 829,2010,2011, 147,2012, 33, 948,2013,2014, 74, 224,2015, 61, 191, 918, 399, 637,2016,1028,1130, 257, 902,2017,2018,2019,2020,2021,2022,2023, 2024,2025,2026, 837,2027,2028,2029,2030, 179, 874, 591, 52, 724, 246,2031,2032, 2033,2034,1167, 969,2035,1289, 630, 605, 911,1091,1168,2036,2037,2038,1441, 912, 2039, 623,2040,2041, 253,1169,1290,2042,1442, 146, 620, 611, 577, 433,2043,1224, 719,1170, 959, 440, 437, 534, 84, 388, 480,1131, 159, 220, 198, 679,2044,1012, 819,1066,1443, 113,1225, 194, 318,1003,1029,2045,2046,2047,2048,1067,2049,2050, 2051,2052,2053, 59, 913, 112,2054, 632,2055, 455, 144, 739,1291,2056, 273, 681, 499,2057, 448,2058,2059, 760,2060,2061, 970, 384, 169, 245,1132,2062,2063, 414, 1444,2064,2065, 41, 235,2066, 157, 252, 877, 568, 919, 789, 580,2067, 725,2068, 2069,1292,2070,2071,1445,2072,1446,2073,2074, 55, 588, 66,1447, 271,1092,2075, 1226,2076, 960,1013, 372,2077,2078,2079,2080,2081,1293,2082,2083,2084,2085, 850, 2086,2087,2088,2089,2090, 186,2091,1068, 180,2092,2093,2094, 109,1227, 522, 606, 2095, 867,1448,1093, 991,1171, 926, 353,1133,2096, 581,2097,2098,2099,1294,1449, 1450,2100, 596,1172,1014,1228,2101,1451,1295,1173,1229,2102,2103,1296,1134,1452, 949,1135,2104,2105,1094,1453,1454,1455,2106,1095,2107,2108,2109,2110,2111,2112, 2113,2114,2115,2116,2117, 804,2118,2119,1230,1231, 805,1456, 405,1136,2120,2121, 2122,2123,2124, 720, 701,1297, 992,1457, 927,1004,2125,2126,2127,2128,2129,2130, 22, 417,2131, 303,2132, 385,2133, 971, 520, 513,2134,1174, 73,1096, 231, 274, 962,1458, 673,2135,1459,2136, 152,1137,2137,2138,2139,2140,1005,1138,1460,1139, 2141,2142,2143,2144, 11, 374, 844,2145, 154,1232, 46,1461,2146, 838, 830, 721, 1233, 106,2147, 90, 428, 462, 578, 566,1175, 352,2148,2149, 538,1234, 124,1298, 2150,1462, 761, 565,2151, 686,2152, 649,2153, 72, 173,2154, 460, 415,2155,1463, 2156,1235, 305,2157,2158,2159,2160,2161,2162, 579,2163,2164,2165,2166,2167, 747, 2168,2169,2170,2171,1464, 669,2172,2173,2174,2175,2176,1465,2177, 23, 530, 285, 2178, 335, 729,2179, 397,2180,2181,2182,1030,2183,2184, 698,2185,2186, 325,2187, 2188, 369,2189, 799,1097,1015, 348,2190,1069, 680,2191, 851,1466,2192,2193, 10, 2194, 613, 424,2195, 979, 108, 449, 589, 27, 172, 81,1031, 80, 774, 281, 350, 1032, 525, 301, 582,1176,2196, 674,1045,2197,2198,1467, 730, 762,2199,2200,2201, 2202,1468,2203, 993,2204,2205, 266,1070, 963,1140,2206,2207,2208, 664,1098, 972, 2209,2210,2211,1177,1469,1470, 871,2212,2213,2214,2215,2216,1471,2217,2218,2219, 2220,2221,2222,2223,2224,2225,2226,2227,1472,1236,2228,2229,2230,2231,2232,2233, 2234,2235,1299,2236,2237, 200,2238, 477, 373,2239,2240, 731, 825, 777,2241,2242, 2243, 521, 486, 548,2244,2245,2246,1473,1300, 53, 549, 137, 875, 76, 158,2247, 1301,1474, 469, 396,1016, 278, 712,2248, 321, 442, 503, 767, 744, 941,1237,1178, 1475,2249, 82, 178,1141,1179, 973,2250,1302,2251, 297,2252,2253, 570,2254,2255, 2256, 18, 450, 206,2257, 290, 292,1142,2258, 511, 162, 99, 346, 164, 735,2259, 1476,1477, 4, 554, 343, 798,1099,2260,1100,2261, 43, 171,1303, 139, 215,2262, 2263, 717, 775,2264,1033, 322, 216,2265, 831,2266, 149,2267,1304,2268,2269, 702, 1238, 135, 845, 347, 309,2270, 484,2271, 878, 655, 238,1006,1478,2272, 67,2273, 295,2274,2275, 461,2276, 478, 942, 412,2277,1034,2278,2279,2280, 265,2281, 541, 2282,2283,2284,2285,2286, 70, 852,1071,2287,2288,2289,2290, 21, 56, 509, 117, 432,2291,2292, 331, 980, 552,1101, 148, 284, 105, 393,1180,1239, 755,2293, 187, 2294,1046,1479,2295, 340,2296, 63,1047, 230,2297,2298,1305, 763,1306, 101, 800, 808, 494,2299,2300,2301, 903,2302, 37,1072, 14, 5,2303, 79, 675,2304, 312, 2305,2306,2307,2308,2309,1480, 6,1307,2310,2311,2312, 1, 470, 35, 24, 229, 2313, 695, 210, 86, 778, 15, 784, 592, 779, 32, 77, 855, 964,2314, 259,2315, 501, 380,2316,2317, 83, 981, 153, 689,1308,1481,1482,1483,2318,2319, 716,1484, 2320,2321,2322,2323,2324,2325,1485,2326,2327, 128, 57, 68, 261,1048, 211, 170, 1240, 31,2328, 51, 435, 742,2329,2330,2331, 635,2332, 264, 456,2333,2334,2335, 425,2336,1486, 143, 507, 263, 943,2337, 363, 920,1487, 256,1488,1102, 243, 601, 1489,2338,2339,2340,2341,2342,2343,2344, 861,2345,2346,2347,2348,2349,2350, 395, 2351,1490,1491, 62, 535, 166, 225,2352,2353, 668, 419,1241, 138, 604, 928,2354, 1181,2355,1492,1493,2356,2357,2358,1143,2359, 696,2360, 387, 307,1309, 682, 476, 2361,2362, 332, 12, 222, 156,2363, 232,2364, 641, 276, 656, 517,1494,1495,1035, 416, 736,1496,2365,1017, 586,2366,2367,2368,1497,2369, 242,2370,2371,2372,1498, 2373, 965, 713,2374,2375,2376,2377, 740, 982,1499, 944,1500,1007,2378,2379,1310, 1501,2380,2381,2382, 785, 329,2383,2384,1502,2385,2386,2387, 932,2388,1503,2389, 2390,2391,2392,1242,2393,2394,2395,2396,2397, 994, 950,2398,2399,2400,2401,1504, 1311,2402,2403,2404,2405,1049, 749,2406,2407, 853, 718,1144,1312,2408,1182,1505, 2409,2410, 255, 516, 479, 564, 550, 214,1506,1507,1313, 413, 239, 444, 339,1145, 1036,1508,1509,1314,1037,1510,1315,2411,1511,2412,2413,2414, 176, 703, 497, 624, 593, 921, 302,2415, 341, 165,1103,1512,2416,1513,2417,2418,2419, 376,2420, 700, 2421,2422,2423, 258, 768,1316,2424,1183,2425, 995, 608,2426,2427,2428,2429, 221, 2430,2431,2432,2433,2434,2435,2436,2437, 195, 323, 726, 188, 897, 983,1317, 377, 644,1050, 879,2438, 452,2439,2440,2441,2442,2443,2444, 914,2445,2446,2447,2448, 915, 489,2449,1514,1184,2450,2451, 515, 64, 427, 495,2452, 583,2453, 483, 485, 1038, 562, 213,1515, 748, 666,2454,2455,2456,2457, 334,2458, 780, 996,1008, 705, 1243,2459,2460,2461,2462,2463, 114,2464, 493,1146, 366, 163,1516, 961,1104,2465, 291,2466,1318,1105,2467,1517, 365,2468, 355, 951,1244,2469,1319,2470, 631,2471, 2472, 218,1320, 364, 320, 756,1518,1519,1321,1520,1322,2473,2474,2475,2476, 997, 2477,2478,2479,2480, 665,1185,2481, 916,1521,2482,2483,2484, 584, 684,2485,2486, 797,2487,1051,1186,2488,2489,2490,1522,2491,2492, 370,2493,1039,1187, 65,2494, 434, 205, 463,1188,2495, 125, 812, 391, 402, 826, 699, 286, 398, 155, 781, 771, 585,2496, 590, 505,1073,2497, 599, 244, 219, 917,1018, 952, 646,1523,2498,1323, 2499,2500, 49, 984, 354, 741,2501, 625,2502,1324,2503,1019, 190, 357, 757, 491, 95, 782, 868,2504,2505,2506,2507,2508,2509, 134,1524,1074, 422,1525, 898,2510, 161,2511,2512,2513,2514, 769,2515,1526,2516,2517, 411,1325,2518, 472,1527,2519, 2520,2521,2522,2523,2524, 985,2525,2526,2527,2528,2529,2530, 764,2531,1245,2532, 2533, 25, 204, 311,2534, 496,2535,1052,2536,2537,2538,2539,2540,2541,2542, 199, 704, 504, 468, 758, 657,1528, 196, 44, 839,1246, 272, 750,2543, 765, 862,2544, 2545,1326,2546, 132, 615, 933,2547, 732,2548,2549,2550,1189,1529,2551, 283,1247, 1053, 607, 929,2552,2553,2554, 930, 183, 872, 616,1040,1147,2555,1148,1020, 441, 249,1075,2556,2557,2558, 466, 743,2559,2560,2561, 92, 514, 426, 420, 526,2562, 2563,2564,2565,2566,2567,2568, 185,2569,2570,2571,2572, 776,1530, 658,2573, 362, 2574, 361, 922,1076, 793,2575,2576,2577,2578,2579,2580,1531, 251,2581,2582,2583, 2584,1532, 54, 612, 237,1327,2585,2586, 275, 408, 647, 111,2587,1533,1106, 465, 3, 458, 9, 38,2588, 107, 110, 890, 209, 26, 737, 498,2589,1534,2590, 431, 202, 88,1535, 356, 287,1107, 660,1149,2591, 381,1536, 986,1150, 445,1248,1151, 974,2592,2593, 846,2594, 446, 953, 184,1249,1250, 727,2595, 923, 193, 883,2596, 2597,2598, 102, 324, 539, 817,2599, 421,1041,2600, 832,2601, 94, 175, 197, 406, 2602, 459,2603,2604,2605,2606,2607, 330, 555,2608,2609,2610, 706,1108, 389,2611, 2612,2613,2614, 233,2615, 833, 558, 931, 954,1251,2616,2617,1537, 546,2618,2619, 1009,2620,2621,2622,1538, 690,1328,2623, 955,2624,1539,2625,2626, 772,2627,2628, 2629,2630,2631, 924, 648, 863, 603,2632,2633, 934,1540, 864, 865,2634, 642,1042, 670,1190,2635,2636,2637,2638, 168,2639, 652, 873, 542,1054,1541,2640,2641,2642, # 512, 256 #Everything below is of no interest for detection purpose 2643,2644,2645,2646,2647,2648,2649,2650,2651,2652,2653,2654,2655,2656,2657,2658, 2659,2660,2661,2662,2663,2664,2665,2666,2667,2668,2669,2670,2671,2672,2673,2674, 2675,2676,2677,2678,2679,2680,2681,2682,2683,2684,2685,2686,2687,2688,2689,2690, 2691,2692,2693,2694,2695,2696,2697,2698,2699,1542, 880,2700,2701,2702,2703,2704, 2705,2706,2707,2708,2709,2710,2711,2712,2713,2714,2715,2716,2717,2718,2719,2720, 2721,2722,2723,2724,2725,1543,2726,2727,2728,2729,2730,2731,2732,1544,2733,2734, 2735,2736,2737,2738,2739,2740,2741,2742,2743,2744,2745,2746,2747,2748,2749,2750, 2751,2752,2753,2754,1545,2755,2756,2757,2758,2759,2760,2761,2762,2763,2764,2765, 2766,1546,2767,1547,2768,2769,2770,2771,2772,2773,2774,2775,2776,2777,2778,2779, 2780,2781,2782,2783,2784,2785,2786,1548,2787,2788,2789,1109,2790,2791,2792,2793, 2794,2795,2796,2797,2798,2799,2800,2801,2802,2803,2804,2805,2806,2807,2808,2809, 2810,2811,2812,1329,2813,2814,2815,2816,2817,2818,2819,2820,2821,2822,2823,2824, 2825,2826,2827,2828,2829,2830,2831,2832,2833,2834,2835,2836,2837,2838,2839,2840, 2841,2842,2843,2844,2845,2846,2847,2848,2849,2850,2851,2852,2853,2854,2855,2856, 1549,2857,2858,2859,2860,1550,2861,2862,1551,2863,2864,2865,2866,2867,2868,2869, 2870,2871,2872,2873,2874,1110,1330,2875,2876,2877,2878,2879,2880,2881,2882,2883, 2884,2885,2886,2887,2888,2889,2890,2891,2892,2893,2894,2895,2896,2897,2898,2899, 2900,2901,2902,2903,2904,2905,2906,2907,2908,2909,2910,2911,2912,2913,2914,2915, 2916,2917,2918,2919,2920,2921,2922,2923,2924,2925,2926,2927,2928,2929,2930,1331, 2931,2932,2933,2934,2935,2936,2937,2938,2939,2940,2941,2942,2943,1552,2944,2945, 2946,2947,2948,2949,2950,2951,2952,2953,2954,2955,2956,2957,2958,2959,2960,2961, 2962,2963,2964,1252,2965,2966,2967,2968,2969,2970,2971,2972,2973,2974,2975,2976, 2977,2978,2979,2980,2981,2982,2983,2984,2985,2986,2987,2988,2989,2990,2991,2992, 2993,2994,2995,2996,2997,2998,2999,3000,3001,3002,3003,3004,3005,3006,3007,3008, 3009,3010,3011,3012,1553,3013,3014,3015,3016,3017,1554,3018,1332,3019,3020,3021, 3022,3023,3024,3025,3026,3027,3028,3029,3030,3031,3032,3033,3034,3035,3036,3037, 3038,3039,3040,3041,3042,3043,3044,3045,3046,3047,3048,3049,3050,1555,3051,3052, 3053,1556,1557,3054,3055,3056,3057,3058,3059,3060,3061,3062,3063,3064,3065,3066, 3067,1558,3068,3069,3070,3071,3072,3073,3074,3075,3076,1559,3077,3078,3079,3080, 3081,3082,3083,1253,3084,3085,3086,3087,3088,3089,3090,3091,3092,3093,3094,3095, 3096,3097,3098,3099,3100,3101,3102,3103,3104,3105,3106,3107,3108,1152,3109,3110, 3111,3112,3113,1560,3114,3115,3116,3117,1111,3118,3119,3120,3121,3122,3123,3124, 3125,3126,3127,3128,3129,3130,3131,3132,3133,3134,3135,3136,3137,3138,3139,3140, 3141,3142,3143,3144,3145,3146,3147,3148,3149,3150,3151,3152,3153,3154,3155,3156, 3157,3158,3159,3160,3161,3162,3163,3164,3165,3166,3167,3168,3169,3170,3171,3172, 3173,3174,3175,3176,1333,3177,3178,3179,3180,3181,3182,3183,3184,3185,3186,3187, 3188,3189,1561,3190,3191,1334,3192,3193,3194,3195,3196,3197,3198,3199,3200,3201, 3202,3203,3204,3205,3206,3207,3208,3209,3210,3211,3212,3213,3214,3215,3216,3217, 3218,3219,3220,3221,3222,3223,3224,3225,3226,3227,3228,3229,3230,3231,3232,3233, 3234,1562,3235,3236,3237,3238,3239,3240,3241,3242,3243,3244,3245,3246,3247,3248, 3249,3250,3251,3252,3253,3254,3255,3256,3257,3258,3259,3260,3261,3262,3263,3264, 3265,3266,3267,3268,3269,3270,3271,3272,3273,3274,3275,3276,3277,1563,3278,3279, 3280,3281,3282,3283,3284,3285,3286,3287,3288,3289,3290,3291,3292,3293,3294,3295, 3296,3297,3298,3299,3300,3301,3302,3303,3304,3305,3306,3307,3308,3309,3310,3311, 3312,3313,3314,3315,3316,3317,3318,3319,3320,3321,3322,3323,3324,3325,3326,3327, 3328,3329,3330,3331,3332,3333,3334,3335,3336,3337,3338,3339,3340,3341,3342,3343, 3344,3345,3346,3347,3348,3349,3350,3351,3352,3353,3354,3355,3356,3357,3358,3359, 3360,3361,3362,3363,3364,1335,3365,3366,3367,3368,3369,3370,3371,3372,3373,3374, 3375,3376,3377,3378,3379,3380,3381,3382,3383,3384,3385,3386,3387,1336,3388,3389, 3390,3391,3392,3393,3394,3395,3396,3397,3398,3399,3400,3401,3402,3403,3404,3405, 3406,3407,3408,3409,3410,3411,3412,3413,3414,1337,3415,3416,3417,3418,3419,1338, 3420,3421,3422,1564,1565,3423,3424,3425,3426,3427,3428,3429,3430,3431,1254,3432, 3433,3434,1339,3435,3436,3437,3438,3439,1566,3440,3441,3442,3443,3444,3445,3446, 3447,3448,3449,3450,3451,3452,3453,3454,1255,3455,3456,3457,3458,3459,1567,1191, 3460,1568,1569,3461,3462,3463,1570,3464,3465,3466,3467,3468,1571,3469,3470,3471, 3472,3473,1572,3474,3475,3476,3477,3478,3479,3480,3481,3482,3483,3484,3485,3486, 1340,3487,3488,3489,3490,3491,3492,1021,3493,3494,3495,3496,3497,3498,1573,3499, 1341,3500,3501,3502,3503,3504,3505,3506,3507,3508,3509,3510,3511,1342,3512,3513, 3514,3515,3516,1574,1343,3517,3518,3519,1575,3520,1576,3521,3522,3523,3524,3525, 3526,3527,3528,3529,3530,3531,3532,3533,3534,3535,3536,3537,3538,3539,3540,3541, 3542,3543,3544,3545,3546,3547,3548,3549,3550,3551,3552,3553,3554,3555,3556,3557, 3558,3559,3560,3561,3562,3563,3564,3565,3566,3567,3568,3569,3570,3571,3572,3573, 3574,3575,3576,3577,3578,3579,3580,1577,3581,3582,1578,3583,3584,3585,3586,3587, 3588,3589,3590,3591,3592,3593,3594,3595,3596,3597,3598,3599,3600,3601,3602,3603, 3604,1579,3605,3606,3607,3608,3609,3610,3611,3612,3613,3614,3615,3616,3617,3618, 3619,3620,3621,3622,3623,3624,3625,3626,3627,3628,3629,1580,3630,3631,1581,3632, 3633,3634,3635,3636,3637,3638,3639,3640,3641,3642,3643,3644,3645,3646,3647,3648, 3649,3650,3651,3652,3653,3654,3655,3656,1582,3657,3658,3659,3660,3661,3662,3663, 3664,3665,3666,3667,3668,3669,3670,3671,3672,3673,3674,3675,3676,3677,3678,3679, 3680,3681,3682,3683,3684,3685,3686,3687,3688,3689,3690,3691,3692,3693,3694,3695, 3696,3697,3698,3699,3700,1192,3701,3702,3703,3704,1256,3705,3706,3707,3708,1583, 1257,3709,3710,3711,3712,3713,3714,3715,3716,1584,3717,3718,3719,3720,3721,3722, 3723,3724,3725,3726,3727,3728,3729,3730,3731,3732,3733,3734,3735,3736,3737,3738, 3739,3740,3741,3742,3743,3744,3745,1344,3746,3747,3748,3749,3750,3751,3752,3753, 3754,3755,3756,1585,3757,3758,3759,3760,3761,3762,3763,3764,3765,3766,1586,3767, 3768,3769,3770,3771,3772,3773,3774,3775,3776,3777,3778,1345,3779,3780,3781,3782, 3783,3784,3785,3786,3787,3788,3789,3790,3791,3792,3793,3794,3795,1346,1587,3796, 3797,1588,3798,3799,3800,3801,3802,3803,3804,3805,3806,1347,3807,3808,3809,3810, 3811,1589,3812,3813,3814,3815,3816,3817,3818,3819,3820,3821,1590,3822,3823,1591, 1348,3824,3825,3826,3827,3828,3829,3830,1592,3831,3832,1593,3833,3834,3835,3836, 3837,3838,3839,3840,3841,3842,3843,3844,1349,3845,3846,3847,3848,3849,3850,3851, 3852,3853,3854,3855,3856,3857,3858,1594,3859,3860,3861,3862,3863,3864,3865,3866, 3867,3868,3869,1595,3870,3871,3872,3873,1596,3874,3875,3876,3877,3878,3879,3880, 3881,3882,3883,3884,3885,3886,1597,3887,3888,3889,3890,3891,3892,3893,3894,3895, 1598,3896,3897,3898,1599,1600,3899,1350,3900,1351,3901,3902,1352,3903,3904,3905, 3906,3907,3908,3909,3910,3911,3912,3913,3914,3915,3916,3917,3918,3919,3920,3921, 3922,3923,3924,1258,3925,3926,3927,3928,3929,3930,3931,1193,3932,1601,3933,3934, 3935,3936,3937,3938,3939,3940,3941,3942,3943,1602,3944,3945,3946,3947,3948,1603, 3949,3950,3951,3952,3953,3954,3955,3956,3957,3958,3959,3960,3961,3962,3963,3964, 3965,1604,3966,3967,3968,3969,3970,3971,3972,3973,3974,3975,3976,3977,1353,3978, 3979,3980,3981,3982,3983,3984,3985,3986,3987,3988,3989,3990,3991,1354,3992,3993, 3994,3995,3996,3997,3998,3999,4000,4001,4002,4003,4004,4005,4006,4007,4008,4009, 4010,4011,4012,4013,4014,4015,4016,4017,4018,4019,4020,4021,4022,4023,1355,4024, 4025,4026,4027,4028,4029,4030,4031,4032,4033,4034,4035,4036,4037,4038,4039,4040, 1605,4041,4042,4043,4044,4045,4046,4047,4048,4049,4050,4051,4052,4053,4054,4055, 4056,4057,4058,4059,4060,1606,4061,4062,4063,4064,1607,4065,4066,4067,4068,4069, 4070,4071,4072,4073,4074,4075,4076,1194,4077,4078,1608,4079,4080,4081,4082,4083, 4084,4085,4086,4087,1609,4088,4089,4090,4091,4092,4093,4094,4095,4096,4097,4098, 4099,4100,4101,4102,4103,4104,4105,4106,4107,4108,1259,4109,4110,4111,4112,4113, 4114,4115,4116,4117,4118,4119,4120,4121,4122,4123,4124,1195,4125,4126,4127,1610, 4128,4129,4130,4131,4132,4133,4134,4135,4136,4137,1356,4138,4139,4140,4141,4142, 4143,4144,1611,4145,4146,4147,4148,4149,4150,4151,4152,4153,4154,4155,4156,4157, 4158,4159,4160,4161,4162,4163,4164,4165,4166,4167,4168,4169,4170,4171,4172,4173, 4174,4175,4176,4177,4178,4179,4180,4181,4182,4183,4184,4185,4186,4187,4188,4189, 4190,4191,4192,4193,4194,4195,4196,4197,4198,4199,4200,4201,4202,4203,4204,4205, 4206,4207,4208,4209,4210,4211,4212,4213,4214,4215,4216,4217,4218,4219,1612,4220, 4221,4222,4223,4224,4225,4226,4227,1357,4228,1613,4229,4230,4231,4232,4233,4234, 4235,4236,4237,4238,4239,4240,4241,4242,4243,1614,4244,4245,4246,4247,4248,4249, 4250,4251,4252,4253,4254,4255,4256,4257,4258,4259,4260,4261,4262,4263,4264,4265, 4266,4267,4268,4269,4270,1196,1358,4271,4272,4273,4274,4275,4276,4277,4278,4279, 4280,4281,4282,4283,4284,4285,4286,4287,1615,4288,4289,4290,4291,4292,4293,4294, 4295,4296,4297,4298,4299,4300,4301,4302,4303,4304,4305,4306,4307,4308,4309,4310, 4311,4312,4313,4314,4315,4316,4317,4318,4319,4320,4321,4322,4323,4324,4325,4326, 4327,4328,4329,4330,4331,4332,4333,4334,1616,4335,4336,4337,4338,4339,4340,4341, 4342,4343,4344,4345,4346,4347,4348,4349,4350,4351,4352,4353,4354,4355,4356,4357, 4358,4359,4360,1617,4361,4362,4363,4364,4365,1618,4366,4367,4368,4369,4370,4371, 4372,4373,4374,4375,4376,4377,4378,4379,4380,4381,4382,4383,4384,4385,4386,4387, 4388,4389,4390,4391,4392,4393,4394,4395,4396,4397,4398,4399,4400,4401,4402,4403, 4404,4405,4406,4407,4408,4409,4410,4411,4412,4413,4414,4415,4416,1619,4417,4418, 4419,4420,4421,4422,4423,4424,4425,1112,4426,4427,4428,4429,4430,1620,4431,4432, 4433,4434,4435,4436,4437,4438,4439,4440,4441,4442,1260,1261,4443,4444,4445,4446, 4447,4448,4449,4450,4451,4452,4453,4454,4455,1359,4456,4457,4458,4459,4460,4461, 4462,4463,4464,4465,1621,4466,4467,4468,4469,4470,4471,4472,4473,4474,4475,4476, 4477,4478,4479,4480,4481,4482,4483,4484,4485,4486,4487,4488,4489,1055,4490,4491, 4492,4493,4494,4495,4496,4497,4498,4499,4500,4501,4502,4503,4504,4505,4506,4507, 4508,4509,4510,4511,4512,4513,4514,4515,4516,4517,4518,1622,4519,4520,4521,1623, 4522,4523,4524,4525,4526,4527,4528,4529,4530,4531,4532,4533,4534,4535,1360,4536, 4537,4538,4539,4540,4541,4542,4543, 975,4544,4545,4546,4547,4548,4549,4550,4551, 4552,4553,4554,4555,4556,4557,4558,4559,4560,4561,4562,4563,4564,4565,4566,4567, 4568,4569,4570,4571,1624,4572,4573,4574,4575,4576,1625,4577,4578,4579,4580,4581, 4582,4583,4584,1626,4585,4586,4587,4588,4589,4590,4591,4592,4593,4594,4595,1627, 4596,4597,4598,4599,4600,4601,4602,4603,4604,4605,4606,4607,4608,4609,4610,4611, 4612,4613,4614,4615,1628,4616,4617,4618,4619,4620,4621,4622,4623,4624,4625,4626, 4627,4628,4629,4630,4631,4632,4633,4634,4635,4636,4637,4638,4639,4640,4641,4642, 4643,4644,4645,4646,4647,4648,4649,1361,4650,4651,4652,4653,4654,4655,4656,4657, 4658,4659,4660,4661,1362,4662,4663,4664,4665,4666,4667,4668,4669,4670,4671,4672, 4673,4674,4675,4676,4677,4678,4679,4680,4681,4682,1629,4683,4684,4685,4686,4687, 1630,4688,4689,4690,4691,1153,4692,4693,4694,1113,4695,4696,4697,4698,4699,4700, 4701,4702,4703,4704,4705,4706,4707,4708,4709,4710,4711,1197,4712,4713,4714,4715, 4716,4717,4718,4719,4720,4721,4722,4723,4724,4725,4726,4727,4728,4729,4730,4731, 4732,4733,4734,4735,1631,4736,1632,4737,4738,4739,4740,4741,4742,4743,4744,1633, 4745,4746,4747,4748,4749,1262,4750,4751,4752,4753,4754,1363,4755,4756,4757,4758, 4759,4760,4761,4762,4763,4764,4765,4766,4767,4768,1634,4769,4770,4771,4772,4773, 4774,4775,4776,4777,4778,1635,4779,4780,4781,4782,4783,4784,4785,4786,4787,4788, 4789,1636,4790,4791,4792,4793,4794,4795,4796,4797,4798,4799,4800,4801,4802,4803, 4804,4805,4806,1637,4807,4808,4809,1638,4810,4811,4812,4813,4814,4815,4816,4817, 4818,1639,4819,4820,4821,4822,4823,4824,4825,4826,4827,4828,4829,4830,4831,4832, 4833,1077,4834,4835,4836,4837,4838,4839,4840,4841,4842,4843,4844,4845,4846,4847, 4848,4849,4850,4851,4852,4853,4854,4855,4856,4857,4858,4859,4860,4861,4862,4863, 4864,4865,4866,4867,4868,4869,4870,4871,4872,4873,4874,4875,4876,4877,4878,4879, 4880,4881,4882,4883,1640,4884,4885,1641,4886,4887,4888,4889,4890,4891,4892,4893, 4894,4895,4896,4897,4898,4899,4900,4901,4902,4903,4904,4905,4906,4907,4908,4909, 4910,4911,1642,4912,4913,4914,1364,4915,4916,4917,4918,4919,4920,4921,4922,4923, 4924,4925,4926,4927,4928,4929,4930,4931,1643,4932,4933,4934,4935,4936,4937,4938, 4939,4940,4941,4942,4943,4944,4945,4946,4947,4948,4949,4950,4951,4952,4953,4954, 4955,4956,4957,4958,4959,4960,4961,4962,4963,4964,4965,4966,4967,4968,4969,4970, 4971,4972,4973,4974,4975,4976,4977,4978,4979,4980,1644,4981,4982,4983,4984,1645, 4985,4986,1646,4987,4988,4989,4990,4991,4992,4993,4994,4995,4996,4997,4998,4999, 5000,5001,5002,5003,5004,5005,1647,5006,1648,5007,5008,5009,5010,5011,5012,1078, 5013,5014,5015,5016,5017,5018,5019,5020,5021,5022,5023,5024,5025,5026,5027,5028, 1365,5029,5030,5031,5032,5033,5034,5035,5036,5037,5038,5039,1649,5040,5041,5042, 5043,5044,5045,1366,5046,5047,5048,5049,5050,5051,5052,5053,5054,5055,1650,5056, 5057,5058,5059,5060,5061,5062,5063,5064,5065,5066,5067,5068,5069,5070,5071,5072, 5073,5074,5075,5076,5077,1651,5078,5079,5080,5081,5082,5083,5084,5085,5086,5087, 5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102,5103, 5104,5105,5106,5107,5108,5109,5110,1652,5111,5112,5113,5114,5115,5116,5117,5118, 1367,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,1653,5130,5131,5132, 5133,5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148, 5149,1368,5150,1654,5151,1369,5152,5153,5154,5155,5156,5157,5158,5159,5160,5161, 5162,5163,5164,5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,5176,5177, 5178,1370,5179,5180,5181,5182,5183,5184,5185,5186,5187,5188,5189,5190,5191,5192, 5193,5194,5195,5196,5197,5198,1655,5199,5200,5201,5202,1656,5203,5204,5205,5206, 1371,5207,1372,5208,5209,5210,5211,1373,5212,5213,1374,5214,5215,5216,5217,5218, 5219,5220,5221,5222,5223,5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234, 5235,5236,5237,5238,5239,5240,5241,5242,5243,5244,5245,5246,5247,1657,5248,5249, 5250,5251,1658,1263,5252,5253,5254,5255,5256,1375,5257,5258,5259,5260,5261,5262, 5263,5264,5265,5266,5267,5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278, 5279,5280,5281,5282,5283,1659,5284,5285,5286,5287,5288,5289,5290,5291,5292,5293, 5294,5295,5296,5297,5298,5299,5300,1660,5301,5302,5303,5304,5305,5306,5307,5308, 5309,5310,5311,5312,5313,5314,5315,5316,5317,5318,5319,5320,5321,1376,5322,5323, 5324,5325,5326,5327,5328,5329,5330,5331,5332,5333,1198,5334,5335,5336,5337,5338, 5339,5340,5341,5342,5343,1661,5344,5345,5346,5347,5348,5349,5350,5351,5352,5353, 5354,5355,5356,5357,5358,5359,5360,5361,5362,5363,5364,5365,5366,5367,5368,5369, 5370,5371,5372,5373,5374,5375,5376,5377,5378,5379,5380,5381,5382,5383,5384,5385, 5386,5387,5388,5389,5390,5391,5392,5393,5394,5395,5396,5397,5398,1264,5399,5400, 5401,5402,5403,5404,5405,5406,5407,5408,5409,5410,5411,5412,1662,5413,5414,5415, 5416,1663,5417,5418,5419,5420,5421,5422,5423,5424,5425,5426,5427,5428,5429,5430, 5431,5432,5433,5434,5435,5436,5437,5438,1664,5439,5440,5441,5442,5443,5444,5445, 5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456,5457,5458,5459,5460,5461, 5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472,5473,5474,5475,5476,5477, 5478,1154,5479,5480,5481,5482,5483,5484,5485,1665,5486,5487,5488,5489,5490,5491, 5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504,5505,5506,5507, 5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520,5521,5522,5523, 5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536,5537,5538,5539, 5540,5541,5542,5543,5544,5545,5546,5547,5548,1377,5549,5550,5551,5552,5553,5554, 5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568,5569,5570, 1114,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584,5585, 5586,5587,5588,5589,5590,5591,5592,1378,5593,5594,5595,5596,5597,5598,5599,5600, 5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,1379,5615, 5616,5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631, 5632,5633,5634,1380,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646, 5647,5648,5649,1381,1056,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660, 1666,5661,5662,5663,5664,5665,5666,5667,5668,1667,5669,1668,5670,5671,5672,5673, 5674,5675,5676,5677,5678,1155,5679,5680,5681,5682,5683,5684,5685,5686,5687,5688, 5689,5690,5691,5692,5693,5694,5695,5696,5697,5698,1669,5699,5700,5701,5702,5703, 5704,5705,1670,5706,5707,5708,5709,5710,1671,5711,5712,5713,5714,1382,5715,5716, 5717,5718,5719,5720,5721,5722,5723,5724,5725,1672,5726,5727,1673,1674,5728,5729, 5730,5731,5732,5733,5734,5735,5736,1675,5737,5738,5739,5740,5741,5742,5743,5744, 1676,5745,5746,5747,5748,5749,5750,5751,1383,5752,5753,5754,5755,5756,5757,5758, 5759,5760,5761,5762,5763,5764,5765,5766,5767,5768,1677,5769,5770,5771,5772,5773, 1678,5774,5775,5776, 998,5777,5778,5779,5780,5781,5782,5783,5784,5785,1384,5786, 5787,5788,5789,5790,5791,5792,5793,5794,5795,5796,5797,5798,5799,5800,1679,5801, 5802,5803,1115,1116,5804,5805,5806,5807,5808,5809,5810,5811,5812,5813,5814,5815, 5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828,5829,5830,5831, 5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844,5845,5846,5847, 5848,5849,5850,5851,5852,5853,5854,5855,1680,5856,5857,5858,5859,5860,5861,5862, 5863,5864,1681,5865,5866,5867,1682,5868,5869,5870,5871,5872,5873,5874,5875,5876, 5877,5878,5879,1683,5880,1684,5881,5882,5883,5884,1685,5885,5886,5887,5888,5889, 5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905, 5906,5907,1686,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920, 5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,1687, 5936,5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951, 5952,1688,1689,5953,1199,5954,5955,5956,5957,5958,5959,5960,5961,1690,5962,5963, 5964,5965,5966,5967,5968,5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979, 5980,5981,1385,5982,1386,5983,5984,5985,5986,5987,5988,5989,5990,5991,5992,5993, 5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004,6005,6006,6007,6008,6009, 6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020,6021,6022,6023,6024,6025, 6026,6027,1265,6028,6029,1691,6030,6031,6032,6033,6034,6035,6036,6037,6038,6039, 6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052,6053,6054,6055, 6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068,6069,6070,6071, 6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084,1692,6085,6086, 6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100,6101,6102, 6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116,6117,6118, 6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,1693,6132,6133, 6134,6135,6136,1694,6137,6138,6139,6140,6141,1695,6142,6143,6144,6145,6146,6147, 6148,6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163, 6164,6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179, 6180,6181,6182,6183,6184,6185,1696,6186,6187,6188,6189,6190,6191,6192,6193,6194, 6195,6196,6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210, 6211,6212,6213,6214,6215,6216,6217,6218,6219,1697,6220,6221,6222,6223,6224,6225, 6226,6227,6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241, 6242,6243,6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,1698,6254,6255,6256, 6257,6258,6259,6260,6261,6262,6263,1200,6264,6265,6266,6267,6268,6269,6270,6271, #1024 6272,6273,6274,6275,6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,6286,6287, 6288,6289,6290,6291,6292,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,1699, 6303,6304,1700,6305,6306,6307,6308,6309,6310,6311,6312,6313,6314,6315,6316,6317, 6318,6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333, 6334,6335,6336,6337,6338,6339,1701,6340,6341,6342,6343,6344,1387,6345,6346,6347, 6348,6349,6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363, 6364,6365,6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379, 6380,6381,6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395, 6396,6397,6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,6411, 6412,6413,1702,6414,6415,6416,6417,6418,6419,6420,6421,6422,1703,6423,6424,6425, 6426,6427,6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,1704,6439,6440, 6441,6442,6443,6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,6455,6456, 6457,6458,6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472, 6473,6474,6475,6476,6477,6478,6479,6480,6481,6482,6483,6484,6485,6486,6487,6488, 6489,6490,6491,6492,6493,6494,6495,6496,6497,6498,6499,6500,6501,6502,6503,1266, 6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516,6517,6518,6519, 6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532,6533,6534,6535, 6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548,6549,6550,6551, 1705,1706,6552,6553,6554,6555,6556,6557,6558,6559,6560,6561,6562,6563,6564,6565, 6566,6567,6568,6569,6570,6571,6572,6573,6574,6575,6576,6577,6578,6579,6580,6581, 6582,6583,6584,6585,6586,6587,6588,6589,6590,6591,6592,6593,6594,6595,6596,6597, 6598,6599,6600,6601,6602,6603,6604,6605,6606,6607,6608,6609,6610,6611,6612,6613, 6614,6615,6616,6617,6618,6619,6620,6621,6622,6623,6624,6625,6626,6627,6628,6629, 6630,6631,6632,6633,6634,6635,6636,6637,1388,6638,6639,6640,6641,6642,6643,6644, 1707,6645,6646,6647,6648,6649,6650,6651,6652,6653,6654,6655,6656,6657,6658,6659, 6660,6661,6662,6663,1708,6664,6665,6666,6667,6668,6669,6670,6671,6672,6673,6674, 1201,6675,6676,6677,6678,6679,6680,6681,6682,6683,6684,6685,6686,6687,6688,6689, 6690,6691,6692,6693,6694,6695,6696,6697,6698,6699,6700,6701,6702,6703,6704,6705, 6706,6707,6708,6709,6710,6711,6712,6713,6714,6715,6716,6717,6718,6719,6720,6721, 6722,6723,6724,6725,1389,6726,6727,6728,6729,6730,6731,6732,6733,6734,6735,6736, 1390,1709,6737,6738,6739,6740,6741,6742,1710,6743,6744,6745,6746,1391,6747,6748, 6749,6750,6751,6752,6753,6754,6755,6756,6757,1392,6758,6759,6760,6761,6762,6763, 6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777,6778,6779, 6780,1202,6781,6782,6783,6784,6785,6786,6787,6788,6789,6790,6791,6792,6793,6794, 6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806,6807,6808,6809,1711, 6810,6811,6812,6813,6814,6815,6816,6817,6818,6819,6820,6821,6822,6823,6824,6825, 6826,6827,6828,6829,6830,6831,6832,6833,6834,6835,6836,1393,6837,6838,6839,6840, 6841,6842,6843,6844,6845,6846,6847,6848,6849,6850,6851,6852,6853,6854,6855,6856, 6857,6858,6859,6860,6861,6862,6863,6864,6865,6866,6867,6868,6869,6870,6871,6872, 6873,6874,6875,6876,6877,6878,6879,6880,6881,6882,6883,6884,6885,6886,6887,6888, 6889,6890,6891,6892,6893,6894,6895,6896,6897,6898,6899,6900,6901,6902,1712,6903, 6904,6905,6906,6907,6908,6909,6910,1713,6911,6912,6913,6914,6915,6916,6917,6918, 6919,6920,6921,6922,6923,6924,6925,6926,6927,6928,6929,6930,6931,6932,6933,6934, 6935,6936,6937,6938,6939,6940,6941,6942,6943,6944,6945,6946,6947,6948,6949,6950, 6951,6952,6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966, 6967,6968,6969,6970,6971,6972,6973,6974,1714,6975,6976,6977,6978,6979,6980,6981, 6982,6983,6984,6985,6986,6987,6988,1394,6989,6990,6991,6992,6993,6994,6995,6996, 6997,6998,6999,7000,1715,7001,7002,7003,7004,7005,7006,7007,7008,7009,7010,7011, 7012,7013,7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027, 7028,1716,7029,7030,7031,7032,7033,7034,7035,7036,7037,7038,7039,7040,7041,7042, 7043,7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058, 7059,7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,7071,7072,7073,7074, 7075,7076,7077,7078,7079,7080,7081,7082,7083,7084,7085,7086,7087,7088,7089,7090, 7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105,7106, 7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,7119,7120,7121,7122, 7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136,7137,7138, 7139,7140,7141,7142,7143,7144,7145,7146,7147,7148,7149,7150,7151,7152,7153,7154, 7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167,7168,7169,7170, 7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183,7184,7185,7186, 7187,7188,7189,7190,7191,7192,7193,7194,7195,7196,7197,7198,7199,7200,7201,7202, 7203,7204,7205,7206,7207,1395,7208,7209,7210,7211,7212,7213,1717,7214,7215,7216, 7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229,7230,7231,7232, 7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245,7246,7247,7248, 7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261,7262,7263,7264, 7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277,7278,7279,7280, 7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293,7294,7295,7296, 7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308,7309,7310,7311,7312, 7313,1718,7314,7315,7316,7317,7318,7319,7320,7321,7322,7323,7324,7325,7326,7327, 7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339,7340,7341,7342,7343, 7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,7354,7355,7356,7357,7358,7359, 7360,7361,7362,7363,7364,7365,7366,7367,7368,7369,7370,7371,7372,7373,7374,7375, 7376,7377,7378,7379,7380,7381,7382,7383,7384,7385,7386,7387,7388,7389,7390,7391, 7392,7393,7394,7395,7396,7397,7398,7399,7400,7401,7402,7403,7404,7405,7406,7407, 7408,7409,7410,7411,7412,7413,7414,7415,7416,7417,7418,7419,7420,7421,7422,7423, 7424,7425,7426,7427,7428,7429,7430,7431,7432,7433,7434,7435,7436,7437,7438,7439, 7440,7441,7442,7443,7444,7445,7446,7447,7448,7449,7450,7451,7452,7453,7454,7455, 7456,7457,7458,7459,7460,7461,7462,7463,7464,7465,7466,7467,7468,7469,7470,7471, 7472,7473,7474,7475,7476,7477,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487, 7488,7489,7490,7491,7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,7503, 7504,7505,7506,7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519, 7520,7521,7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535, 7536,7537,7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,7550,7551, 7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567, 7568,7569,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582,7583, 7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598,7599, 7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614,7615, 7616,7617,7618,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628,7629,7630,7631, 7632,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643,7644,7645,7646,7647, 7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659,7660,7661,7662,7663, 7664,7665,7666,7667,7668,7669,7670,7671,7672,7673,7674,7675,7676,7677,7678,7679, 7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690,7691,7692,7693,7694,7695, 7696,7697,7698,7699,7700,7701,7702,7703,7704,7705,7706,7707,7708,7709,7710,7711, 7712,7713,7714,7715,7716,7717,7718,7719,7720,7721,7722,7723,7724,7725,7726,7727, 7728,7729,7730,7731,7732,7733,7734,7735,7736,7737,7738,7739,7740,7741,7742,7743, 7744,7745,7746,7747,7748,7749,7750,7751,7752,7753,7754,7755,7756,7757,7758,7759, 7760,7761,7762,7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775, 7776,7777,7778,7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791, 7792,7793,7794,7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,7806,7807, 7808,7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823, 7824,7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839, 7840,7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855, 7856,7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871, 7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887, 7888,7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903, 7904,7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919, 7920,7921,7922,7923,7924,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935, 7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951, 7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967, 7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983, 7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999, 8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015, 8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031, 8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047, 8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063, 8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079, 8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095, 8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111, 8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127, 8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143, 8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159, 8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175, 8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191, 8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207, 8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223, 8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239, 8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255, 8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271, 8272,8273,8274,8275,8276,8277,8278,8279,8280,8281,8282,8283,8284,8285,8286,8287, 8288,8289,8290,8291,8292,8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303, 8304,8305,8306,8307,8308,8309,8310,8311,8312,8313,8314,8315,8316,8317,8318,8319, 8320,8321,8322,8323,8324,8325,8326,8327,8328,8329,8330,8331,8332,8333,8334,8335, 8336,8337,8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351, 8352,8353,8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,8364,8365,8366,8367, 8368,8369,8370,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382,8383, 8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398,8399, 8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,8411,8412,8413,8414,8415, 8416,8417,8418,8419,8420,8421,8422,8423,8424,8425,8426,8427,8428,8429,8430,8431, 8432,8433,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443,8444,8445,8446,8447, 8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459,8460,8461,8462,8463, 8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475,8476,8477,8478,8479, 8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490,8491,8492,8493,8494,8495, 8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506,8507,8508,8509,8510,8511, 8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522,8523,8524,8525,8526,8527, 8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538,8539,8540,8541,8542,8543, 8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554,8555,8556,8557,8558,8559, 8560,8561,8562,8563,8564,8565,8566,8567,8568,8569,8570,8571,8572,8573,8574,8575, 8576,8577,8578,8579,8580,8581,8582,8583,8584,8585,8586,8587,8588,8589,8590,8591, 8592,8593,8594,8595,8596,8597,8598,8599,8600,8601,8602,8603,8604,8605,8606,8607, 8608,8609,8610,8611,8612,8613,8614,8615,8616,8617,8618,8619,8620,8621,8622,8623, 8624,8625,8626,8627,8628,8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,8639, 8640,8641,8642,8643,8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655, 8656,8657,8658,8659,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671, 8672,8673,8674,8675,8676,8677,8678,8679,8680,8681,8682,8683,8684,8685,8686,8687, 8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703, 8704,8705,8706,8707,8708,8709,8710,8711,8712,8713,8714,8715,8716,8717,8718,8719, 8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,8734,8735, 8736,8737,8738,8739,8740,8741) # flake8: noqa
gpl-3.0
Ghostboy-287/okadminfinder3
okadminfinder.py
1
18022
#!/usr/bin/env python3 # -*- coding: utf-8 -*- try: # Change main dir to this (need for Pentest Box) import os os.path.abspath(__file__) from Classes import (Credits, OKadminFinderClass, MessengerClass) import argparse from colorama import Fore, Back, Style import random import requests import socket import socks import subprocess import sys import time import threading from tqdm import tqdm import urllib.request, urllib.error, urllib.parse from urllib.request import urlopen # Get Messenger class to print information messenger = MessengerClass.Messenger() except(): exit('\n\t[x] Session Cancelled; Something wrong with import modules') # Get credits and print it messenger.writeMessage(Credits.getCredits()[0], 'green') # Get main class object OKadminFinder = OKadminFinderClass.OKadminFinder() parser = argparse.ArgumentParser(formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=30, width=90)) parser.add_argument("-u", "--url", default=False, help="Target URL (e.g. 'www.example.com' or 'example.com')") parser.add_argument("-t", "--tor", action='store_true', default=False, help="Use Tor anonymity network") parser.add_argument("-p", "--proxy", default=False, help="Use an HTTP proxy (e.g '127.0.0.1:8080')") parser.add_argument("-rp", "--random-proxy", action="store_true", default=False, dest="random_proxy", help="Use randomly selected proxy server") parser.add_argument("-r", "--random-agent", action='store_true', default=False, dest='rand', help="Use randomly selected User-Agent") parser.add_argument("-v", "--verbose", action='store_true', default=False, help="Display more informations") parser.add_argument("-U", "--update", action='store_true', default=False, help="Update OKadminFinder") parser.add_argument("-i", "--interactive", action='store_true', default=False, help="Interactive interface" + Fore.RED+Style.BRIGHT + "[other arguments not required]") if len(sys.argv) <= 1: parser.print_usage() sys.exit(1) else: args = parser.parse_args() # site = 'testphp.vulnweb.com' proxies = "" headers = {'user-agent': 'OKadminFinder/%s' % Credits.getCredits()[1]} OKadminFinder.header = headers def url(site): try: if OKadminFinder.checkUrl(site, proxies): messenger.writeMessage('\n Site %s is stable\n' % site, 'green') urls = tqdm(OKadminFinder.getUrls('LinkFile/adminpanellinks.txt'), bar_format="{l_bar}{bar}|{n_fmt}/{total_fmt}{postfix}") else: messenger.writeMessage(' Something wrong with url', 'red') urls = tqdm(OKadminFinder.getUrls('LinkFile/adminpanellinks.txt'), bar_format="{bar}") exit(SystemExit) # Get links for checking # Counters for total links, and admin panel find totalCount = len(urls) adminCount = 0 # Checking all links for url in urls: # Create test link with getting params from site and links.txt file reqLink = OKadminFinder.createReqLink(site, url, proxies) # messenger.writeMessage('\t[#] Checking http://' + reqLink, 'yellow') urls.set_description(Fore.WHITE + Style.NORMAL + " Processing ...") # Test created link for HTTPerrors. If not error - potential admin panel if OKadminFinder.checkUrl(reqLink, proxies): adminCount += 1 messenger.writeMessage('\n' + Fore.CYAN + Style.BRIGHT + ' {:<50}'.format('[✔] http://' + reqLink,) + Fore.GREEN + Style.BRIGHT + '{:>30}'.format('Admin page found!\n'), 'bright') # Stopped process? and waiting for input for continue n = 10 for x in range(totalCount): #what to do every time. if adminCount % n == 0: #what to do every nth time. messenger.writeInput(' Press' +Fore.BLUE+Style.BRIGHT+ ' ENTER ' +Fore.WHITE + Style.NORMAL + 'to continue scanning OR' +Fore.RED + Style.BRIGHT + ' CTRL+C ' + Fore.WHITE + Style.NORMAL + 'to cancel \n') break else: continue # If HTTPerrors continue testing other links else: continue # Write last information about scanning with counters messenger.writeMessage('\n\n Completed \n', 'green') messenger.writeMessage(str(adminCount) + ' Admin pages found', 'white') messenger.writeMessage(str(totalCount) + ' total pages scanned', 'white') messenger.writeInput(' [/] Scanning over; Press Enter to Exit', 'green') messenger.writeMessage('', 'white') except (KeyboardInterrupt, SystemExit): messenger.writeMessage('\n\t[x] Session Cancelled', 'red') urls.close() messenger.writeMessage('', 'white') except(): messenger.writeMessage('\n\t[x] Session Cancelled; Unknown error', 'red') messenger.writeMessage('', 'white') def random_agent(): useragent = "LinkFile/user-agent.txt" ua = open(useragent, 'r').read().splitlines() rua = random.choice(ua) headers = {'user-agent': rua} OKadminFinder.header = headers return OKadminFinder.header def random_proxy(): proxy_list = requests.get('https://raw.githubusercontent.com/a2u/free-proxy-list/master/free-proxy-list.txt').text.splitlines() random_proxy = random.choice(proxy_list) rip = random_proxy.rsplit(':', 1)[0] #random proxy ip rpp = random_proxy.rsplit(':', 1)[1] #random proxy port proxies = { 'http': random_proxy, 'https': random_proxy, } try: s = socks.socksocket() s.set_proxy(socks.HTTP, rip, rpp) socket.socket = socks.socksocket urllib.request.urlopen except (IndexError, IndentationError): messenger.writeMessage('\n\tSorry Error 😭 ', 'red') quit(0) return proxies def tor(): socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, 'localhost', 9050) socket.socket = socks.socksocket urllib.request.urlopen def proxy(): args.proxy=str(args.proxy) proxies ={ 'http': args.proxy, 'https': args.proxy, } try: ht = args.proxy.split(':') pr = int(ht[1]) s = socks.socksocket() s.set_proxy(socks.HTTP, ht[0], pr) socket.socket = socks.socksocket urllib.request.urlopen except (IndexError, IndentationError): messenger.writeMessage('\n\tPlease check the format of your proxy | reminder: 127.0.0.1:8080 ', 'red') quit(0) try: print(Fore.BLUE + '\tChecking Http proxy...', end="\r") time.sleep(1) rp = requests.get('http://testphp.vulnweb.com', proxies=proxies, timeout=10) print(Fore.BLUE + '\tChecking Http proxy...', Fore.GREEN+Style.BRIGHT + 'OK\n' + Fore.WHITE + Style.NORMAL) except requests.RequestException: print(Fore.BLUE + '\tChecking Http proxy...', Fore.RED + Style.BRIGHT + 'BAD\n' + Fore.WHITE + Style.NORMAL) messenger.writeMessage('\n ╔═══[!] Connection Troubles', 'red') print(' ║') print(' ╚══►' + Fore.BLUE + '[Note]' + Fore.YELLOW + '╾╥──╸ Please check your connection, proxy or tor') print(' ╟──╸ ' + Fore.YELLOW+Style.BRIGHT + 'don\'t add' + Fore.YELLOW + Style.NORMAL + ' \'http://\' or \'https://\'') print(' ╙──╸ ' + Fore.YELLOW + Style.NORMAL + 'check that you have written the url correctly\n') quit(0) return proxies def ipinf(): ip = requests.get('http://ifconfig.co/ip', proxies=proxies, headers=OKadminFinder.header).text cc = requests.get('http://ifconfig.co/country', proxies=proxies, headers=OKadminFinder.header).text iso = requests.get('http://ifconfig.co/country-iso', proxies=proxies, headers=OKadminFinder.header).text city = requests.get('http://ifconfig.co/city', proxies=proxies, headers=OKadminFinder.header).text print(''' ┆ ├───[''' + Fore.CYAN + '''IP address Infos:''' + Fore.YELLOW + '''] ┆'''); print(' ├──► '+ Fore.BLUE +'Country: '+ cc + Fore.YELLOW +' ├───► '+ Fore.BLUE +'IP: ' + ip + Fore.YELLOW + ' ├────► '+ Fore.BLUE +'Country ISO: ' + iso + Fore.YELLOW + ' └────► '+ Fore.BLUE +'City: ' + city) print('') def vipinf(): ip = requests.get('http://ifconfig.co/ip', proxies=proxies, headers=OKadminFinder.header).text cc = requests.get('http://ifconfig.co/country', proxies=proxies, headers=OKadminFinder.header).text iso = requests.get('http://ifconfig.co/country-iso', proxies=proxies, headers=OKadminFinder.header).text city = requests.get('http://ifconfig.co/city', proxies=proxies, headers=OKadminFinder.header).text print(''' ┌───[''' + Fore.CYAN + '''IP address Infos:''' + Fore.YELLOW + '''] ┆'''); print(' ├──► ' + Fore.BLUE + 'Country: ' + cc + Fore.YELLOW + ' ├───► ' + Fore.BLUE + 'IP: ' + ip + Fore.YELLOW + ' ├────► ' + Fore.BLUE + 'Country ISO: ' + iso + Fore.YELLOW + ' └─────► '+ Fore.BLUE +'City: ' + city) print('') def hos(): site = args.url rh = requests.get('http://'+site,proxies=proxies, headers=OKadminFinder.header) di = socket.gethostbyname(site) print(Fore.CYAN + Style.BRIGHT + '\tServer: ' + Fore.YELLOW + rh.headers['Server'] + '\t\t' + Fore.CYAN + Style.BRIGHT +'Hostname: ' + Fore.YELLOW + di + '\n') try: xf = dict(rh.headers).get("x-frame-options") xf = str(xf) print(Fore.CYAN + Style.BRIGHT +'\tX-Powered-By: ' + Fore.YELLOW + rh.headers['X-Powered-By'] + '\t\t' + Fore.CYAN + Style.BRIGHT + 'X-Frame-Options: ' + Fore.YELLOW + xf + '\n\n') except KeyError: pass def update(): process = subprocess.Popen(["git", "pull"], stdout=subprocess.PIPE) output = process.communicate()[0].decode("utf-8") print(output) def interactive(): try: # Random UserAgent #Useragents are from: https://techblog.willshouse.com/2012/01/03/most-common-user-agents/ try: print(Fore.BLUE + '\tGetting random user-agent...', end="\r") time.sleep(1) useragent = "LinkFile/user-agent.txt" ua = open(useragent, 'r').read().splitlines() rua = random.choice(ua) headers = {'user-agent': rua} print(Fore.BLUE + '\tGetting random user-agent...', Fore.GREEN+Style.BRIGHT + 'DONE\n' + Fore.WHITE + Style.NORMAL) except: headers = {'user-agent': 'OKadminFinder/%s' % Credits.getCredits()[1]} pass OKadminFinder.header = headers # Additional params # if not messenger.writeInputWithYesNo(Fore.YELLOW + ' Do you want use default params?'): # timeout = messenger.writeInput(Fore.YELLOW + ' Change timeout. Please write value in seconds: ' + Fore.GREEN) # OKadminFinder.timeout = timeout #Updater #network params choice='' print(Fore.YELLOW + ' ┌───[' + Fore.CYAN + 'Network settings:' + Fore.YELLOW + ']'); while (choice not in ['1','2','3','tor','proxy']): choice=input(Fore.YELLOW + ''' ┊ ├╼[1] tor ├╼[2] proxy ├╼[3] nothing ┊ └───╼''' + Fore.RED + ''' Please choose one option''' + Fore.YELLOW + ''' ~$ ''') if choice == '1' or choice == 'tor': socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, 'localhost', 9050) socket.socket = socks.socksocket urllib.request.urlopen proxies="" elif choice == '2' or choice == 'proxy': prox = input(''' ┊ └────► set your HTTP proxy {example:127.0.0.1:80} : ~$ ''') proxies = { 'http': 'http://'+prox, 'https': 'http://'+prox, } try: ht = prox.split(':') pr = int(ht[1]) s = socks.socksocket() s.set_proxy(socks.HTTP, ht[0], pr) socket.socket = socks.socksocket urllib.request.urlopen except IndexError: messenger.writeMessage('\n\tPlease check the format of your proxy | reminder: 127.0.0.1:8080 ', 'red') quit(0) else: proxies = "" continue ip = requests.get('http://ifconfig.co/ip', proxies=proxies, headers=OKadminFinder.header).text cc = requests.get('http://ifconfig.co/country', proxies=proxies, headers=OKadminFinder.header).text iso = requests.get('http://ifconfig.co/country-iso', proxies=proxies, headers=OKadminFinder.header).text city = requests.get('http://ifconfig.co/city', proxies=proxies, headers=OKadminFinder.header).text print(''' ┆ ├───[''' + Fore.CYAN + '''IP address Infos:''' + Fore.YELLOW + '''] ┆'''); print(' ├──► ' + Fore.BLUE +'Country: ' + cc + Fore.YELLOW + ' ├───► ' + Fore.BLUE +'IP: ' + ip + Fore.YELLOW + ' ├────► '+ Fore.BLUE + 'Country ISO: ' + iso + Fore.YELLOW + ' └─────► '+ Fore.BLUE +'City: ' + city) print('') # Get site site = messenger.writeInput(' Enter Site Name { example : example.com or www.example.com } \n' + Fore.BLUE + ' ~$ ', 'white'); print ('') # Checking if the website is online and stable if OKadminFinder.checkUrl(site,proxies): messenger.writeMessage('\n Site %s is stable\n' % site,'green') else: messenger.writeMessage(' Something wrong with url', 'red') exit(SystemExit) #Some additional info about the website rh = requests.get('http://'+site, proxies=proxies, headers=OKadminFinder.header) di = socket.gethostbyname(site) print(Fore.CYAN + Style.BRIGHT + '\tServer: ' + Fore.YELLOW + rh.headers['Server'] + '\t\t' + Fore.CYAN + Style.BRIGHT +'Hostname: ' + Fore.YELLOW + di + '\n') try: xf = dict(rh.headers).get("x-frame-options") xf = str(xf) print(Fore.CYAN + Style.BRIGHT + '\tX-Powered-By: ' + Fore.YELLOW + rh.headers['X-Powered-By'] + '\t\t' + Fore.CYAN+Style.BRIGHT + 'X-Frame-Options: ' + Fore.YELLOW + xf + '\n\n') except KeyError: pass # Get links for checking urls = OKadminFinder.getUrls('LinkFile/adminpanellinks.txt') # Counters for total links, and admin panel find totalCount = len(urls) adminCount = 0 # Checking all links for url in urls: # Create test link with getting params from input and links.txt file reqLink = OKadminFinder.createReqLink(site, url, proxies) messenger.writeMessage('\t[#] Checking http://' + reqLink, 'yellow') # Test created link for HTTPerrors. If not error - potential admin panel if OKadminFinder.checkUrl(reqLink,proxies): adminCount += 1 messenger.writeMessage(' %s %s' % ('\n [✔] http://' + reqLink, 'Admin page found!'), 'bright') # Stopped process? and waiting for input for continue messenger.writeInput(' Press enter to continue scanning.\n') # If HTTPerrors continue testing other links else: continue # Write last information about scanning with counters messenger.writeMessage('\n\n Completed \n', 'green') messenger.writeMessage(str(adminCount) + ' Admin pages found', 'white') messenger.writeMessage(str(totalCount) + ' total pages scanned', 'white') messenger.writeInput(' [/] Scanning over; Press Enter to Exit', 'green') messenger.writeMessage('', 'white') except (KeyboardInterrupt, SystemExit): messenger.writeMessage('\n\t[x] Session Cancelled', 'red') messenger.writeMessage('', 'white') except(): messenger.writeMessage('\n\t[x] Session Cancelled; Unknown error', 'red') messenger.writeMessage('', 'white') if __name__ == '__main__': # Updater if args.update: args.url = False args.tor = False args.rand = False args.proxy = False args.verbose = False args.interactive = False update() # interactive if args.interactive: args.url = False args.tor = False args.rand = False args.proxy = False args.verbose = False interactive() # random user-agent if args.rand: if args.url is False: parser.print_usage() quit(0) else: random_agent() # random proxy if args.random_proxy: if args.url is False: parser.print_usage() quit(0) else: random_proxy() proxies = random_proxy() # tor if args.tor: if args.url is False: parser.print_usage() quit(0) else: tor() # proxy if args.proxy: if args.url is False: parser.print_usage() quit(0) else: proxy() proxies = proxy() # verbose if args.verbose: if args.url is False: parser.print_usage() quit(0) else: vipinf() hos() # url if args.url: site = args.url # proxies="" url(site)
apache-2.0
Ashaba/rms
rmslocalenv/lib/python2.7/site-packages/django/core/validators.py
32
15447
from __future__ import unicode_literals import re from django.core.exceptions import ValidationError from django.utils import six from django.utils.deconstruct import deconstructible from django.utils.encoding import force_text from django.utils.functional import SimpleLazyObject from django.utils.ipv6 import is_valid_ipv6_address from django.utils.six.moves.urllib.parse import urlsplit, urlunsplit from django.utils.translation import ugettext_lazy as _, ungettext_lazy # These values, if given to validate(), will trigger the self.required check. EMPTY_VALUES = (None, '', [], (), {}) def _lazy_re_compile(regex, flags=0): """Lazily compile a regex with flags.""" def _compile(): # Compile the regex if it was not passed pre-compiled. if isinstance(regex, six.string_types): return re.compile(regex, flags) else: assert not flags, "flags must be empty if regex is passed pre-compiled" return regex return SimpleLazyObject(_compile) @deconstructible class RegexValidator(object): regex = '' message = _('Enter a valid value.') code = 'invalid' inverse_match = False flags = 0 def __init__(self, regex=None, message=None, code=None, inverse_match=None, flags=None): if regex is not None: self.regex = regex if message is not None: self.message = message if code is not None: self.code = code if inverse_match is not None: self.inverse_match = inverse_match if flags is not None: self.flags = flags if self.flags and not isinstance(self.regex, six.string_types): raise TypeError("If the flags are set, regex must be a regular expression string.") self.regex = _lazy_re_compile(self.regex, self.flags) def __call__(self, value): """ Validates that the input matches the regular expression if inverse_match is False, otherwise raises ValidationError. """ if not (self.inverse_match is not bool(self.regex.search( force_text(value)))): raise ValidationError(self.message, code=self.code) def __eq__(self, other): return ( isinstance(other, RegexValidator) and self.regex.pattern == other.regex.pattern and self.regex.flags == other.regex.flags and (self.message == other.message) and (self.code == other.code) and (self.inverse_match == other.inverse_match) ) def __ne__(self, other): return not (self == other) @deconstructible class URLValidator(RegexValidator): ul = '\u00a1-\uffff' # unicode letters range (must be a unicode string, not a raw string) # IP patterns ipv4_re = r'(?:25[0-5]|2[0-4]\d|[0-1]?\d?\d)(?:\.(?:25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}' ipv6_re = r'\[[0-9a-f:\.]+\]' # (simple regex, validated later) # Host patterns hostname_re = r'[a-z' + ul + r'0-9](?:[a-z' + ul + r'0-9-]*[a-z' + ul + r'0-9])?' domain_re = r'(?:\.(?!-)[a-z' + ul + r'0-9-]+(?<!-))*' tld_re = ( '\.' # dot '(?!-)' # can't start with a dash '(?:[a-z' + ul + '-]{2,}' # domain label '|xn--[a-z0-9]+)' # or punycode label '(?<!-)' # can't end with a dash '\.?' # may have a trailing dot ) host_re = '(' + hostname_re + domain_re + tld_re + '|localhost)' regex = _lazy_re_compile( r'^(?:[a-z0-9\.\-]*)://' # scheme is validated separately r'(?:\S+(?::\S*)?@)?' # user:pass authentication r'(?:' + ipv4_re + '|' + ipv6_re + '|' + host_re + ')' r'(?::\d{2,5})?' # port r'(?:[/?#][^\s]*)?' # resource path r'\Z', re.IGNORECASE) message = _('Enter a valid URL.') schemes = ['http', 'https', 'ftp', 'ftps'] def __init__(self, schemes=None, **kwargs): super(URLValidator, self).__init__(**kwargs) if schemes is not None: self.schemes = schemes def __call__(self, value): value = force_text(value) # Check first if the scheme is valid scheme = value.split('://')[0].lower() if scheme not in self.schemes: raise ValidationError(self.message, code=self.code) # Then check full URL try: super(URLValidator, self).__call__(value) except ValidationError as e: # Trivial case failed. Try for possible IDN domain if value: scheme, netloc, path, query, fragment = urlsplit(value) try: netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE except UnicodeError: # invalid domain part raise e url = urlunsplit((scheme, netloc, path, query, fragment)) super(URLValidator, self).__call__(url) else: raise else: # Now verify IPv6 in the netloc part host_match = re.search(r'^\[(.+)\](?::\d{2,5})?$', urlsplit(value).netloc) if host_match: potential_ip = host_match.groups()[0] try: validate_ipv6_address(potential_ip) except ValidationError: raise ValidationError(self.message, code=self.code) url = value integer_validator = RegexValidator( _lazy_re_compile('^-?\d+\Z'), message=_('Enter a valid integer.'), code='invalid', ) def validate_integer(value): return integer_validator(value) @deconstructible class EmailValidator(object): message = _('Enter a valid email address.') code = 'invalid' user_regex = _lazy_re_compile( r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*\Z" # dot-atom r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"\Z)', # quoted-string re.IGNORECASE) domain_regex = _lazy_re_compile( # max length for domain name labels is 63 characters per RFC 1034 r'((?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+)(?:[A-Z0-9-]{2,63}(?<!-))\Z', re.IGNORECASE) literal_regex = _lazy_re_compile( # literal form, ipv4 or ipv6 address (SMTP 4.1.3) r'\[([A-f0-9:\.]+)\]\Z', re.IGNORECASE) domain_whitelist = ['localhost'] def __init__(self, message=None, code=None, whitelist=None): if message is not None: self.message = message if code is not None: self.code = code if whitelist is not None: self.domain_whitelist = whitelist def __call__(self, value): value = force_text(value) if not value or '@' not in value: raise ValidationError(self.message, code=self.code) user_part, domain_part = value.rsplit('@', 1) if not self.user_regex.match(user_part): raise ValidationError(self.message, code=self.code) if (domain_part not in self.domain_whitelist and not self.validate_domain_part(domain_part)): # Try for possible IDN domain-part try: domain_part = domain_part.encode('idna').decode('ascii') if self.validate_domain_part(domain_part): return except UnicodeError: pass raise ValidationError(self.message, code=self.code) def validate_domain_part(self, domain_part): if self.domain_regex.match(domain_part): return True literal_match = self.literal_regex.match(domain_part) if literal_match: ip_address = literal_match.group(1) try: validate_ipv46_address(ip_address) return True except ValidationError: pass return False def __eq__(self, other): return ( isinstance(other, EmailValidator) and (self.domain_whitelist == other.domain_whitelist) and (self.message == other.message) and (self.code == other.code) ) validate_email = EmailValidator() slug_re = _lazy_re_compile(r'^[-a-zA-Z0-9_]+\Z') validate_slug = RegexValidator( slug_re, _("Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens."), 'invalid' ) slug_unicode_re = _lazy_re_compile(r'^[-\w]+\Z', re.U) validate_unicode_slug = RegexValidator( slug_unicode_re, _("Enter a valid 'slug' consisting of Unicode letters, numbers, underscores, or hyphens."), 'invalid' ) ipv4_re = _lazy_re_compile(r'^(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\Z') validate_ipv4_address = RegexValidator(ipv4_re, _('Enter a valid IPv4 address.'), 'invalid') def validate_ipv6_address(value): if not is_valid_ipv6_address(value): raise ValidationError(_('Enter a valid IPv6 address.'), code='invalid') def validate_ipv46_address(value): try: validate_ipv4_address(value) except ValidationError: try: validate_ipv6_address(value) except ValidationError: raise ValidationError(_('Enter a valid IPv4 or IPv6 address.'), code='invalid') ip_address_validator_map = { 'both': ([validate_ipv46_address], _('Enter a valid IPv4 or IPv6 address.')), 'ipv4': ([validate_ipv4_address], _('Enter a valid IPv4 address.')), 'ipv6': ([validate_ipv6_address], _('Enter a valid IPv6 address.')), } def ip_address_validators(protocol, unpack_ipv4): """ Depending on the given parameters returns the appropriate validators for the GenericIPAddressField. This code is here, because it is exactly the same for the model and the form field. """ if protocol != 'both' and unpack_ipv4: raise ValueError( "You can only use `unpack_ipv4` if `protocol` is set to 'both'") try: return ip_address_validator_map[protocol.lower()] except KeyError: raise ValueError("The protocol '%s' is unknown. Supported: %s" % (protocol, list(ip_address_validator_map))) def int_list_validator(sep=',', message=None, code='invalid'): regexp = _lazy_re_compile('^\d+(?:%s\d+)*\Z' % re.escape(sep)) return RegexValidator(regexp, message=message, code=code) validate_comma_separated_integer_list = int_list_validator( message=_('Enter only digits separated by commas.'), ) @deconstructible class BaseValidator(object): compare = lambda self, a, b: a is not b clean = lambda self, x: x message = _('Ensure this value is %(limit_value)s (it is %(show_value)s).') code = 'limit_value' def __init__(self, limit_value, message=None): self.limit_value = limit_value if message: self.message = message def __call__(self, value): cleaned = self.clean(value) params = {'limit_value': self.limit_value, 'show_value': cleaned, 'value': value} if self.compare(cleaned, self.limit_value): raise ValidationError(self.message, code=self.code, params=params) def __eq__(self, other): return ( isinstance(other, self.__class__) and (self.limit_value == other.limit_value) and (self.message == other.message) and (self.code == other.code) ) @deconstructible class MaxValueValidator(BaseValidator): compare = lambda self, a, b: a > b message = _('Ensure this value is less than or equal to %(limit_value)s.') code = 'max_value' @deconstructible class MinValueValidator(BaseValidator): compare = lambda self, a, b: a < b message = _('Ensure this value is greater than or equal to %(limit_value)s.') code = 'min_value' @deconstructible class MinLengthValidator(BaseValidator): compare = lambda self, a, b: a < b clean = lambda self, x: len(x) message = ungettext_lazy( 'Ensure this value has at least %(limit_value)d character (it has %(show_value)d).', 'Ensure this value has at least %(limit_value)d characters (it has %(show_value)d).', 'limit_value') code = 'min_length' @deconstructible class MaxLengthValidator(BaseValidator): compare = lambda self, a, b: a > b clean = lambda self, x: len(x) message = ungettext_lazy( 'Ensure this value has at most %(limit_value)d character (it has %(show_value)d).', 'Ensure this value has at most %(limit_value)d characters (it has %(show_value)d).', 'limit_value') code = 'max_length' @deconstructible class DecimalValidator(object): """ Validate that the input does not exceed the maximum number of digits expected, otherwise raise ValidationError. """ messages = { 'max_digits': ungettext_lazy( 'Ensure that there are no more than %(max)s digit in total.', 'Ensure that there are no more than %(max)s digits in total.', 'max' ), 'max_decimal_places': ungettext_lazy( 'Ensure that there are no more than %(max)s decimal place.', 'Ensure that there are no more than %(max)s decimal places.', 'max' ), 'max_whole_digits': ungettext_lazy( 'Ensure that there are no more than %(max)s digit before the decimal point.', 'Ensure that there are no more than %(max)s digits before the decimal point.', 'max' ), } def __init__(self, max_digits, decimal_places): self.max_digits = max_digits self.decimal_places = decimal_places def __call__(self, value): digit_tuple, exponent = value.as_tuple()[1:] decimals = abs(exponent) # digit_tuple doesn't include any leading zeros. digits = len(digit_tuple) if decimals > digits: # We have leading zeros up to or past the decimal point. Count # everything past the decimal point as a digit. We do not count # 0 before the decimal point as a digit since that would mean # we would not allow max_digits = decimal_places. digits = decimals whole_digits = digits - decimals if self.max_digits is not None and digits > self.max_digits: raise ValidationError( self.messages['max_digits'], code='max_digits', params={'max': self.max_digits}, ) if self.decimal_places is not None and decimals > self.decimal_places: raise ValidationError( self.messages['max_decimal_places'], code='max_decimal_places', params={'max': self.decimal_places}, ) if (self.max_digits is not None and self.decimal_places is not None and whole_digits > (self.max_digits - self.decimal_places)): raise ValidationError( self.messages['max_whole_digits'], code='max_whole_digits', params={'max': (self.max_digits - self.decimal_places)}, ) def __eq__(self, other): return ( isinstance(other, self.__class__) and self.max_digits == other.max_digits and self.decimal_places == other.decimal_places )
mit
cevaris/pants
src/python/pants/util/dirutil.py
2
12074
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import atexit import errno import os import shutil import stat import tempfile import threading import uuid from collections import defaultdict from contextlib import contextmanager from pants.util.strutil import ensure_text def fast_relpath(path, start): """A prefix-based relpath, with no normalization or support for returning `..`.""" if not path.startswith(start): raise ValueError('{} is not a prefix of {}'.format(start, path)) if len(path) == len(start): # Items are identical: the relative path is empty. return '' elif len(start) == 0: # Empty prefix. return path elif start[-1] == '/': # The prefix indicates that it is a directory. return path[len(start):] elif path[len(start)] == '/': # The suffix indicates that the prefix is a directory. return path[len(start)+1:] else: raise ValueError('{} is not a directory containing {}'.format(start, path)) def safe_mkdir(directory, clean=False): """Ensure a directory is present. If it's not there, create it. If it is, no-op. If clean is True, ensure the dir is empty. :API: public """ if clean: safe_rmtree(directory) try: os.makedirs(directory) except OSError as e: if e.errno != errno.EEXIST: raise def safe_mkdir_for(path): """Ensure that the parent directory for a file is present. If it's not there, create it. If it is, no-op. """ safe_mkdir(os.path.dirname(path), clean=False) def safe_file_dump(filename, payload): """Write a string to a file. :param string filename: The filename of the file to write to. :param string payload: The string to write to the file. """ with safe_open(filename, 'wb') as f: f.write(payload) def read_file(filename): """Read and return the contents of a file in a single file.read(). :param string filename: The filename of the file to read. :returns: The contents of the file. :rtype: string """ with open(filename, 'rb') as f: return f.read() def safe_walk(path, **kwargs): """Just like os.walk, but ensures that the returned values are unicode objects. This isn't strictly safe, in that it is possible that some paths will not be decodeable, but that case is rare, and the only alternative is to somehow avoid all interaction between paths and unicode objects, which seems especially tough in the presence of unicode_literals. See e.g. https://mail.python.org/pipermail/python-dev/2008-December/083856.html :API: public """ # If os.walk is given a text argument, it yields text values; if it # is given a binary argument, it yields binary values. return os.walk(ensure_text(path), **kwargs) _MKDTEMP_CLEANER = None _MKDTEMP_DIRS = defaultdict(set) _MKDTEMP_LOCK = threading.RLock() def _mkdtemp_atexit_cleaner(): for td in _MKDTEMP_DIRS.pop(os.getpid(), []): safe_rmtree(td) def _mkdtemp_unregister_cleaner(): global _MKDTEMP_CLEANER _MKDTEMP_CLEANER = None def _mkdtemp_register_cleaner(cleaner): global _MKDTEMP_CLEANER if not cleaner: return assert callable(cleaner) if _MKDTEMP_CLEANER is None: atexit.register(cleaner) _MKDTEMP_CLEANER = cleaner def safe_mkdtemp(cleaner=_mkdtemp_atexit_cleaner, **kw): """Create a temporary directory that is cleaned up on process exit. Arguments are as to tempfile.mkdtemp. :API: public """ # Proper lock sanitation on fork [issue 6721] would be desirable here. with _MKDTEMP_LOCK: return register_rmtree(tempfile.mkdtemp(**kw), cleaner=cleaner) def register_rmtree(directory, cleaner=_mkdtemp_atexit_cleaner): """Register an existing directory to be cleaned up at process exit.""" with _MKDTEMP_LOCK: _mkdtemp_register_cleaner(cleaner) _MKDTEMP_DIRS[os.getpid()].add(directory) return directory def safe_rmtree(directory): """Delete a directory if it's present. If it's not present, no-op. Note that if the directory argument is a symlink, only the symlink will be deleted. :API: public """ if os.path.islink(directory): safe_delete(directory) else: shutil.rmtree(directory, ignore_errors=True) def safe_open(filename, *args, **kwargs): """Open a file safely, ensuring that its directory exists. :API: public """ safe_mkdir_for(filename) return open(filename, *args, **kwargs) def safe_delete(filename): """Delete a file safely. If it's not present, no-op.""" try: os.unlink(filename) except OSError as e: if e.errno != errno.ENOENT: raise def safe_concurrent_rename(src, dst): """Rename src to dst, ignoring errors due to dst already existing. Useful when concurrent processes may attempt to create dst, and it doesn't matter who wins. """ # Delete dst, in case it existed (with old content) even before any concurrent processes # attempted this write. This ensures that at least one process writes the new content. if os.path.isdir(src): # Note that dst may not exist, so we test for the type of src. safe_rmtree(dst) else: safe_delete(dst) try: shutil.move(src, dst) except IOError as e: if e.errno != errno.EEXIST: raise def safe_rm_oldest_items_in_dir(root_dir, num_of_items_to_keep, excludes=frozenset()): """ Keep `num_of_items_to_keep` newly modified items besides `excludes` in `root_dir` then remove the rest. :param root_dir: the folder to examine :param num_of_items_to_keep: number of files/folders/symlinks to keep after the cleanup :param excludes: absolute paths excluded from removal (must be prefixed with `root_dir`) :return: none """ if os.path.isdir(root_dir): found_files = [] for old_file in os.listdir(root_dir): full_path = os.path.join(root_dir, old_file) if full_path not in excludes: found_files.append((full_path, os.path.getmtime(full_path))) found_files = sorted(found_files, key=lambda x: x[1], reverse=True) for cur_file, _ in found_files[num_of_items_to_keep:]: rm_rf(cur_file) @contextmanager def safe_concurrent_creation(target_path): """A contextmanager that yields a temporary path and renames it to a final target path when the contextmanager exits. Useful when concurrent processes may attempt to create a file, and it doesn't matter who wins. :param target_path: The final target path to rename the temporary path to. :yields: A temporary path containing the original path with a unique (uuid4) suffix. """ safe_mkdir_for(target_path) tmp_path = '{}.tmp.{}'.format(target_path, uuid.uuid4().hex) try: yield tmp_path finally: if os.path.exists(tmp_path): safe_concurrent_rename(tmp_path, target_path) def chmod_plus_x(path): """Equivalent of unix `chmod a+x path`""" path_mode = os.stat(path).st_mode path_mode &= int('777', 8) if path_mode & stat.S_IRUSR: path_mode |= stat.S_IXUSR if path_mode & stat.S_IRGRP: path_mode |= stat.S_IXGRP if path_mode & stat.S_IROTH: path_mode |= stat.S_IXOTH os.chmod(path, path_mode) def absolute_symlink(source_path, target_path): """Create a symlink at target pointing to source using the absolute path. :param source_path: Absolute path to source file :param target_path: Absolute path to intended symlink :raises ValueError if source_path or link_path are not unique, absolute paths :raises OSError on failure UNLESS file already exists or no such file/directory """ if not os.path.isabs(source_path): raise ValueError("Path for source : {} must be absolute".format(source_path)) if not os.path.isabs(target_path): raise ValueError("Path for link : {} must be absolute".format(target_path)) if source_path == target_path: raise ValueError("Path for link is identical to source : {}".format(source_path)) try: if os.path.lexists(target_path): if os.path.islink(target_path) or os.path.isfile(target_path): os.unlink(target_path) else: shutil.rmtree(target_path) safe_mkdir_for(target_path) os.symlink(source_path, target_path) except OSError as e: # Another run may beat us to deletion or creation. if not (e.errno == errno.EEXIST or e.errno == errno.ENOENT): raise def relative_symlink(source_path, link_path): """Create a symlink at link_path pointing to relative source :param source_path: Absolute path to source file :param link_path: Absolute path to intended symlink :raises ValueError if source_path or link_path are not unique, absolute paths :raises OSError on failure UNLESS file already exists or no such file/directory """ if not os.path.isabs(source_path): raise ValueError("Path for source:{} must be absolute".format(source_path)) if not os.path.isabs(link_path): raise ValueError("Path for link:{} must be absolute".format(link_path)) if source_path == link_path: raise ValueError("Path for link is identical to source:{}".format(source_path)) try: if os.path.lexists(link_path): os.unlink(link_path) rel_path = os.path.relpath(source_path, os.path.dirname(link_path)) os.symlink(rel_path, link_path) except OSError as e: # Another run may beat us to deletion or creation. if not (e.errno == errno.EEXIST or e.errno == errno.ENOENT): raise def relativize_path(path, rootdir): """ :API: public """ # Note that we can't test for length and return the shorter of the two, because we need these # paths to be stable across systems (e.g., because they get embedded in analysis files), # and this choice might be inconsistent across systems. So we assume the relpath is always # shorter. We relativize because of a known case of very long full path prefixes on Mesos, # so this seems like the right heuristic. # Note also that we mustn't call realpath on the path - we need to preserve the symlink structure. return os.path.relpath(path, rootdir) # When running pants under mesos/aurora, the sandbox pathname can be very long. Since it gets # prepended to most components in the classpath (some from ivy, the rest from the build), # in some runs the classpath gets too big and exceeds ARG_MAX. # We prevent this by using paths relative to the current working directory. def relativize_paths(paths, rootdir): return [relativize_path(path, rootdir) for path in paths] def touch(path, times=None): """Equivalent of unix `touch path`. :API: public :path: The file to touch. :times Either a tuple of (atime, mtime) or else a single time to use for both. If not specified both atime and mtime are updated to the current time. """ if times: if len(times) > 2: raise ValueError('times must either be a tuple of (atime, mtime) or else a single time value ' 'to use for both.') if len(times) == 1: times = (times, times) with safe_open(path, 'a'): os.utime(path, times) def get_basedir(path): """Returns the base directory of a path. Examples: get_basedir('foo/bar/baz') --> 'foo' get_basedir('/foo/bar/baz') --> '' get_basedir('foo') --> 'foo' """ return path[:path.index(os.sep)] if os.sep in path else path def rm_rf(name): """Remove a file or a directory similarly to running `rm -rf <name>` in a UNIX shell. :param str name: the name of the file or directory to remove. :raises: OSError on error. """ if not os.path.exists(name): return try: # Avoid using safe_rmtree so we can detect failures. shutil.rmtree(name) except OSError as e: if e.errno == errno.ENOTDIR: # 'Not a directory', but a file. Attempt to os.unlink the file, raising OSError on failure. safe_delete(name) elif e.errno != errno.ENOENT: # Pass on 'No such file or directory', otherwise re-raise OSError to surface perm issues etc. raise
apache-2.0
pamfilos/invenio
modules/webaccess/lib/external_authentication_cern.py
25
7730
# -*- coding: utf-8 -*- ## ## This file is part of Invenio. ## Copyright (C) 2007, 2008, 2009, 2010, 2011 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """External user authentication for CERN NICE/CRA Invenio.""" __revision__ = \ "$Id$" import httplib import socket import re from invenio.errorlib import register_exception from invenio.external_authentication import ExternalAuth, \ InvenioWebAccessExternalAuthError from invenio.external_authentication_cern_wrapper import AuthCernWrapper # Tunable list of settings to be hidden CFG_EXTERNAL_AUTH_CERN_HIDDEN_SETTINGS = ['auth', 'respccid', 'ccid'] # Tunable list of groups to be hidden CFG_EXTERNAL_AUTH_HIDDEN_GROUPS = ( 'All Exchange People', 'CERN Users', 'cern-computing-postmasters@cern.ch', 'cern-nice2000-postmasters@cern.ch', 'CMF FrontEnd Users', 'CMF_NSC_259_NSU', 'Domain Users', 'GP Apply Favorites Redirection', 'GP Apply NoAdmin', 'info-terminalservices@cern.ch', 'info-terminalservices-members@cern.ch', 'IT Web IT', 'NICE Deny Enforce Password-protected Screensaver', 'NICE Enforce Password-protected Screensaver', 'NICE LightWeight Authentication WS Users', 'NICE MyDocuments Redirection (New)', 'NICE Profile Redirection', 'NICE Terminal Services Users', 'NICE Users', 'NICE VPN Users', ) CFG_EXTERNAL_AUTH_HIDDEN_GROUPS_RE = ( re.compile(r'Users by Letter [A-Z]'), re.compile(r'building-[\d]+'), re.compile(r'Users by Home CERNHOME[A-Z]'), ) class ExternalAuthCern(ExternalAuth): """ External authentication example for a custom HTTPS-based authentication service (called "CERN NICE"). """ def __init__(self): """Initialize stuff here""" ExternalAuth.__init__(self) try: self.connection = AuthCernWrapper() except (httplib.CannotSendRequest, socket.error, AttributeError, IOError, TypeError), msg: # Let the user note that # no connection is available register_exception(alert_admin=True) raise InvenioWebAccessExternalAuthError, msg def _try_twice(self, funct, params): """Try twice to execute funct on self.connection passing it params. If for various reason the connection doesn't work it's restarted """ try: ret = funct(self.connection, **params) except (httplib.CannotSendRequest, socket.error, AttributeError, IOError, TypeError): try: self.connection = AuthCernWrapper() ret = funct(self.connection, **params) except (httplib.CannotSendRequest, socket.error, AttributeError, IOError, TypeError): register_exception(alert_admin=True) self.connection = None raise InvenioWebAccessExternalAuthError return ret def auth_user(self, username, password, req=None): """ Check USERNAME and PASSWORD against CERN NICE/CRA database. Return (None, None) if authentication failed, or the (email address, nickname) of the person if the authentication was successful. In order to do this you may perhaps have to keep a translation table between usernames and email addresses. If it is the first time the user logs in Invenio the nickname is stored alongside the email. If this nickname is unfortunatly already in use it is discarded. Otherwise it is ignored. Raise InvenioWebAccessExternalAuthError in case of external troubles. """ infos = self._try_twice(funct=AuthCernWrapper.get_user_info, \ params={"user_name":username, "password":password}) if "email" in infos: return infos["email"], infos["login"] else: return None, None def user_exists(self, email, req=None): """Checks against CERN NICE/CRA for existance of email. @return: True if the user exists, False otherwise """ users = self._try_twice(funct=AuthCernWrapper.list_users, \ params={"display_name":email}) return email.upper() in [user['email'].upper() for user in users] def fetch_user_groups_membership(self, email, password=None, req=None): """Fetch user groups membership from the CERN NICE/CRA account. @return: a dictionary of groupname, group description """ groups = self._try_twice(funct=AuthCernWrapper.get_groups_for_user, \ params={"user_name":email}) # Filtering out uncomfortable groups groups = [group for group in groups if group not in CFG_EXTERNAL_AUTH_HIDDEN_GROUPS] for regexp in CFG_EXTERNAL_AUTH_HIDDEN_GROUPS_RE: for group in groups: if regexp.match(group): groups.remove(group) # Produce list of double value: group/mailing list(with stripped # @cern.ch) name, and group/description built from the name. return dict(map(lambda x: (x.find('@') > -1 and x[:x.find('@')] or x, '@' in x and x + ' (CERN Mailing list)' or x + ' (CERN Group)'), groups)) def fetch_user_nickname(self, username, password, req=None): """Given a username and a password, returns the right nickname belonging to that user (username could be an email). """ infos = self._try_twice(funct=AuthCernWrapper.get_user_info, params={"user_name":username, "password":password}) if "login" in infos: return infos["login"] else: return None def fetch_user_preferences(self, username, password=None, req=None): """Fetch user preferences/settings from the CERN Nice account. the external key will be '1' if the account is external to NICE/CRA, otherwise 0 @return: a dictionary. Note: auth and respccid are hidden """ prefs = self._try_twice(funct=AuthCernWrapper.get_user_info, \ params={"user_name":username, "password":password}) ret = {} try: if int(prefs['auth']) == 3 \ and (int(prefs['respccid']) > 0 \ or not prefs['email'].endswith('@cern.ch')): ret['external'] = '1' else: ret['external'] = '0' except KeyError: ret['external'] = '1' for key, value in prefs.items(): if key in CFG_EXTERNAL_AUTH_CERN_HIDDEN_SETTINGS: ret['HIDDEN_' + key] = value else: ret[key] = value ## Hack to be forward-compatible with CERN SSO implementation if ret.has_key('company'): ret['homeinstitute'] = ret['company'] del ret['company'] if ret.has_key('name'): ret['fullname'] = ret['name'] del ret['name'] return ret
gpl-2.0
yceruto/django
django/conf/locale/nn/formats.py
197
1810
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # from __future__ import unicode_literals # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'j. F Y' TIME_FORMAT = 'H:i' DATETIME_FORMAT = 'j. F Y H:i' YEAR_MONTH_FORMAT = 'F Y' MONTH_DAY_FORMAT = 'j. F' SHORT_DATE_FORMAT = 'd.m.Y' SHORT_DATETIME_FORMAT = 'd.m.Y H:i' FIRST_DAY_OF_WEEK = 1 # Monday # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior # Kept ISO formats as they are in first position DATE_INPUT_FORMATS = ( '%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06' # '%d. %b %Y', '%d %b %Y', # '25. okt 2006', '25 okt 2006' # '%d. %b. %Y', '%d %b. %Y', # '25. okt. 2006', '25 okt. 2006' # '%d. %B %Y', '%d %B %Y', # '25. oktober 2006', '25 oktober 2006' ) DATETIME_INPUT_FORMATS = ( '%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59' '%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200' '%Y-%m-%d %H:%M', # '2006-10-25 14:30' '%Y-%m-%d', # '2006-10-25' '%Y-%m-%d', # '2006-10-25' '%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59' '%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200' '%d.%m.%Y %H:%M', # '25.10.2006 14:30' '%d.%m.%Y', # '25.10.2006' '%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59' '%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200' '%d.%m.%y %H:%M', # '25.10.06 14:30' '%d.%m.%y', # '25.10.06' ) DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '\xa0' # non-breaking space NUMBER_GROUPING = 3
bsd-3-clause
cuongthai/cuongthai-s-blog
django/contrib/localflavor/is_/is_postalcodes.py
438
4913
# -*- coding: utf-8 -*- IS_POSTALCODES = ( ('101', u'101 Reykjavík'), ('103', u'103 Reykjavík'), ('104', u'104 Reykjavík'), ('105', u'105 Reykjavík'), ('107', u'107 Reykjavík'), ('108', u'108 Reykjavík'), ('109', u'109 Reykjavík'), ('110', u'110 Reykjavík'), ('111', u'111 Reykjavík'), ('112', u'112 Reykjavík'), ('113', u'113 Reykjavík'), ('116', u'116 Kjalarnes'), ('121', u'121 Reykjavík'), ('123', u'123 Reykjavík'), ('124', u'124 Reykjavík'), ('125', u'125 Reykjavík'), ('127', u'127 Reykjavík'), ('128', u'128 Reykjavík'), ('129', u'129 Reykjavík'), ('130', u'130 Reykjavík'), ('132', u'132 Reykjavík'), ('150', u'150 Reykjavík'), ('155', u'155 Reykjavík'), ('170', u'170 Seltjarnarnes'), ('172', u'172 Seltjarnarnes'), ('190', u'190 Vogar'), ('200', u'200 Kópavogur'), ('201', u'201 Kópavogur'), ('202', u'202 Kópavogur'), ('203', u'203 Kópavogur'), ('210', u'210 Garðabær'), ('212', u'212 Garðabær'), ('220', u'220 Hafnarfjörður'), ('221', u'221 Hafnarfjörður'), ('222', u'222 Hafnarfjörður'), ('225', u'225 Álftanes'), ('230', u'230 Reykjanesbær'), ('232', u'232 Reykjanesbær'), ('233', u'233 Reykjanesbær'), ('235', u'235 Keflavíkurflugvöllur'), ('240', u'240 Grindavík'), ('245', u'245 Sandgerði'), ('250', u'250 Garður'), ('260', u'260 Reykjanesbær'), ('270', u'270 Mosfellsbær'), ('300', u'300 Akranes'), ('301', u'301 Akranes'), ('302', u'302 Akranes'), ('310', u'310 Borgarnes'), ('311', u'311 Borgarnes'), ('320', u'320 Reykholt í Borgarfirði'), ('340', u'340 Stykkishólmur'), ('345', u'345 Flatey á Breiðafirði'), ('350', u'350 Grundarfjörður'), ('355', u'355 Ólafsvík'), ('356', u'356 Snæfellsbær'), ('360', u'360 Hellissandur'), ('370', u'370 Búðardalur'), ('371', u'371 Búðardalur'), ('380', u'380 Reykhólahreppur'), ('400', u'400 Ísafjörður'), ('401', u'401 Ísafjörður'), ('410', u'410 Hnífsdalur'), ('415', u'415 Bolungarvík'), ('420', u'420 Súðavík'), ('425', u'425 Flateyri'), ('430', u'430 Suðureyri'), ('450', u'450 Patreksfjörður'), ('451', u'451 Patreksfjörður'), ('460', u'460 Tálknafjörður'), ('465', u'465 Bíldudalur'), ('470', u'470 Þingeyri'), ('471', u'471 Þingeyri'), ('500', u'500 Staður'), ('510', u'510 Hólmavík'), ('512', u'512 Hólmavík'), ('520', u'520 Drangsnes'), ('522', u'522 Kjörvogur'), ('523', u'523 Bær'), ('524', u'524 Norðurfjörður'), ('530', u'530 Hvammstangi'), ('531', u'531 Hvammstangi'), ('540', u'540 Blönduós'), ('541', u'541 Blönduós'), ('545', u'545 Skagaströnd'), ('550', u'550 Sauðárkrókur'), ('551', u'551 Sauðárkrókur'), ('560', u'560 Varmahlíð'), ('565', u'565 Hofsós'), ('566', u'566 Hofsós'), ('570', u'570 Fljót'), ('580', u'580 Siglufjörður'), ('600', u'600 Akureyri'), ('601', u'601 Akureyri'), ('602', u'602 Akureyri'), ('603', u'603 Akureyri'), ('610', u'610 Grenivík'), ('611', u'611 Grímsey'), ('620', u'620 Dalvík'), ('621', u'621 Dalvík'), ('625', u'625 Ólafsfjörður'), ('630', u'630 Hrísey'), ('640', u'640 Húsavík'), ('641', u'641 Húsavík'), ('645', u'645 Fosshóll'), ('650', u'650 Laugar'), ('660', u'660 Mývatn'), ('670', u'670 Kópasker'), ('671', u'671 Kópasker'), ('675', u'675 Raufarhöfn'), ('680', u'680 Þórshöfn'), ('681', u'681 Þórshöfn'), ('685', u'685 Bakkafjörður'), ('690', u'690 Vopnafjörður'), ('700', u'700 Egilsstaðir'), ('701', u'701 Egilsstaðir'), ('710', u'710 Seyðisfjörður'), ('715', u'715 Mjóifjörður'), ('720', u'720 Borgarfjörður eystri'), ('730', u'730 Reyðarfjörður'), ('735', u'735 Eskifjörður'), ('740', u'740 Neskaupstaður'), ('750', u'750 Fáskrúðsfjörður'), ('755', u'755 Stöðvarfjörður'), ('760', u'760 Breiðdalsvík'), ('765', u'765 Djúpivogur'), ('780', u'780 Höfn í Hornafirði'), ('781', u'781 Höfn í Hornafirði'), ('785', u'785 Öræfi'), ('800', u'800 Selfoss'), ('801', u'801 Selfoss'), ('802', u'802 Selfoss'), ('810', u'810 Hveragerði'), ('815', u'815 Þorlákshöfn'), ('820', u'820 Eyrarbakki'), ('825', u'825 Stokkseyri'), ('840', u'840 Laugarvatn'), ('845', u'845 Flúðir'), ('850', u'850 Hella'), ('851', u'851 Hella'), ('860', u'860 Hvolsvöllur'), ('861', u'861 Hvolsvöllur'), ('870', u'870 Vík'), ('871', u'871 Vík'), ('880', u'880 Kirkjubæjarklaustur'), ('900', u'900 Vestmannaeyjar'), ('902', u'902 Vestmannaeyjar') )
bsd-3-clause
dikyarga/modularitea
src/opt/pymodularitea/build/lib/modularitea/atom_.py
2
2290
#! /usr/bin/python3 import sys import json import os import subprocess import apt import apt_pkg import apt.progress.text import apt.progress.base from urllib.request import urlretrieve from gi.repository import Gtk # harus diganti saat rilis PREFIX = '/home/mnirfan/Projects/modularitea/' # executable apt APT_PATH = "/usr/bin/apt" apt_available = os.path.isfile(APT_PATH) # tambahkan direktori user ke aplikasi agar bisa import sys.path.insert(PREFIX, 0) class Atom: name = '' data = '' def __init__(self, atom_name): self.name = atom_name with open(PREFIX + 'atoms/'+self.name+'/package.json') as json_file: self.data = json.load(json_file) def install(self): # print(self.data) prefered_source = self.data['package']['prefered_source'] print("prefered source: ", self.data['package']['prefered_source']) if prefered_source == "ubuntu-apt" and apt_available: self.install_with_apt() elif prefered_source == "http_download": self.install_from_download() def install_from_download(self, progress): # http://stackoverflow.com/questions/13881092/download-progressbar-for-python-3#13895723 def reporthook(blocknum, blocksize, totalsize): readsofar = blocknum * blocksize progress.set_fraction(readsofar) def install_with_apt(self): print("install with apt...") if 'ppa' in self.data['package']['source']['ubuntu-apt']: print("adding ppa...") p = subprocess.Popen(['apt-add-repository', '--yes', self.data['package']['source']['ubuntu-apt']['ppa']], stdout=subprocess.PIPE, stderr=subprocess.PIPE) response = p.communicate() if p.returncode == 0: print("ppa added") c = apt.Cache() c.update() c[self.get_package_name()].mark_install() c.commit(fetch_progress=apt.progress.text.AcquireProgress) else: c = apt.Cache() c[self.get_package_name()].mark_install() c.commit() def get_package_name(self): return self.data['package']['source']['ubuntu-apt']['name']
mit
hogasa/normalizador-amba
setup.py
1
1202
# coding: UTF-8 # !/usr/bin/env python from __future__ import absolute_import try: from setuptools import setup except ImportError: from distutils.core import setup import usig_normalizador_amba with open('README.rst', 'r') as f: readme = f.read() setup( name=usig_normalizador_amba.__title__, version=usig_normalizador_amba.__version__, description=usig_normalizador_amba.__description__, long_description=readme, author=usig_normalizador_amba.__author__, author_email=usig_normalizador_amba.__author_email__, url='https://github.com/usig/normalizador-amba', license=usig_normalizador_amba.__license__, packages=['usig_normalizador_amba'], keywords='usig gcba gis normalizador direcciones amba', platforms=['Unix/Linux'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: Spanish', 'Operating System :: Unix', 'Programming Language :: Python :: 2.7', 'Topic :: Scientific/Engineering :: GIS', 'Topic :: Software Development :: Libraries :: Python Modules', ] )
mit
sahana/Turkey
modules/s3db/msg.py
7
91452
# -*- coding: utf-8 -*- """ Sahana Eden Messaging Model @copyright: 2009-2015 (c) Sahana Software Foundation @license: MIT Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ __all__ = ("S3ChannelModel", "S3MessageModel", "S3MessageAttachmentModel", "S3EmailModel", "S3FacebookModel", "S3MCommonsModel", "S3ParsingModel", "S3RSSModel", "S3SMSModel", "S3SMSOutboundModel", "S3TropoModel", "S3TwilioModel", "S3TwitterModel", "S3TwitterSearchModel", "S3XFormsModel", "S3BaseStationModel", ) from gluon import * from gluon.storage import Storage from ..s3 import * # Compact JSON encoding SEPARATORS = (",", ":") # ============================================================================= class S3ChannelModel(S3Model): """ Messaging Channels - all Inbound & Outbound channels for messages are instances of this super-entity """ names = ("msg_channel", "msg_channel_limit", "msg_channel_status", "msg_channel_id", "msg_channel_enable", "msg_channel_disable", "msg_channel_enable_interactive", "msg_channel_disable_interactive", "msg_channel_onaccept", ) def model(self): T = current.T db = current.db define_table = self.define_table #---------------------------------------------------------------------- # Super entity: msg_channel # channel_types = Storage(msg_email_channel = T("Email (Inbound)"), msg_facebook_channel = T("Facebook"), msg_mcommons_channel = T("Mobile Commons (Inbound)"), msg_rss_channel = T("RSS Feed"), msg_sms_modem_channel = T("SMS Modem"), msg_sms_webapi_channel = T("SMS WebAPI (Outbound)"), msg_sms_smtp_channel = T("SMS via SMTP (Outbound)"), msg_tropo_channel = T("Tropo"), msg_twilio_channel = T("Twilio (Inbound)"), msg_twitter_channel = T("Twitter"), ) tablename = "msg_channel" self.super_entity(tablename, "channel_id", channel_types, Field("name", #label = T("Name"), ), Field("description", #label = T("Description"), ), Field("enabled", "boolean", default = True, #label = T("Enabled?") #represent = s3_yes_no_represent, ), # @ToDo: Indicate whether channel can be used for Inbound or Outbound #Field("inbound", "boolean", # label = T("Inbound?")), #Field("outbound", "boolean", # label = T("Outbound?")), ) # @todo: make lazy_table table = db[tablename] table.instance_type.readable = True # Reusable Field channel_id = S3ReusableField("channel_id", "reference %s" % tablename, label = T("Channel"), ondelete = "SET NULL", represent = S3Represent(lookup=tablename), requires = IS_EMPTY_OR( IS_ONE_OF_EMPTY(db, "msg_channel.id")), ) self.add_components(tablename, msg_channel_status = "channel_id", ) # --------------------------------------------------------------------- # Channel Limit # Used to limit the number of emails sent from the system # - works by simply recording an entry for the timestamp to be checked against # # - currently just used by msg.send_email() # tablename = "msg_channel_limit" define_table(tablename, # @ToDo: Make it per-channel #channel_id(), *s3_timestamp()) # --------------------------------------------------------------------- # Channel Status # Used to record errors encountered in the Channel # tablename = "msg_channel_status" define_table(tablename, channel_id(), Field("status", #label = T("Status"), #represent = s3_yes_no_represent, represent = lambda v: v or current.messages["NONE"], ), *s3_meta_fields()) # --------------------------------------------------------------------- return dict(msg_channel_id = channel_id, msg_channel_enable = self.channel_enable, msg_channel_disable = self.channel_disable, msg_channel_enable_interactive = self.channel_enable_interactive, msg_channel_disable_interactive = self.channel_disable_interactive, msg_channel_onaccept = self.channel_onaccept, msg_channel_poll = self.channel_poll, ) # ------------------------------------------------------------------------- @staticmethod def channel_enable(tablename, channel_id): """ Enable a Channel - Schedule a Poll for new messages - Enable all associated Parsers CLI API for shell scripts & to be called by S3Method """ db = current.db s3db = current.s3db table = s3db.table(tablename) record = db(table.channel_id == channel_id).select(table.id, # needed for update_record table.enabled, limitby=(0, 1), ).first() if not record.enabled: # Flag it as enabled # Update Instance record.update_record(enabled = True) # Update Super s3db.update_super(table, record) # Enable all Parser tasks on this channel ptable = s3db.msg_parser query = (ptable.channel_id == channel_id) & \ (ptable.deleted == False) parsers = db(query).select(ptable.id) for parser in parsers: s3db.msg_parser_enable(parser.id) # Do we have an existing Task? ttable = db.scheduler_task args = '["%s", %s]' % (tablename, channel_id) query = ((ttable.function_name == "msg_poll") & \ (ttable.args == args) & \ (ttable.status.belongs(["RUNNING", "QUEUED", "ALLOCATED"]))) exists = db(query).select(ttable.id, limitby=(0, 1)).first() if exists: return "Channel already enabled" else: current.s3task.schedule_task("msg_poll", args = [tablename, channel_id], period = 300, # seconds timeout = 300, # seconds repeats = 0 # unlimited ) return "Channel enabled" # ------------------------------------------------------------------------- @staticmethod def channel_enable_interactive(r, **attr): """ Enable a Channel - Schedule a Poll for new messages S3Method for interactive requests """ tablename = r.tablename result = current.s3db.msg_channel_enable(tablename, r.record.channel_id) current.session.confirmation = result fn = tablename.split("_", 1)[1] redirect(URL(f=fn)) # ------------------------------------------------------------------------- @staticmethod def channel_disable(tablename, channel_id): """ Disable a Channel - Remove schedule for Polling for new messages - Disable all associated Parsers CLI API for shell scripts & to be called by S3Method """ db = current.db s3db = current.s3db table = s3db.table(tablename) record = db(table.channel_id == channel_id).select(table.id, # needed for update_record table.enabled, limitby=(0, 1), ).first() if record.enabled: # Flag it as disabled # Update Instance record.update_record(enabled = False) # Update Super s3db.update_super(table, record) # Disable all Parser tasks on this channel ptable = s3db.msg_parser parsers = db(ptable.channel_id == channel_id).select(ptable.id) for parser in parsers: s3db.msg_parser_disable(parser.id) # Do we have an existing Task? ttable = db.scheduler_task args = '["%s", %s]' % (tablename, channel_id) query = ((ttable.function_name == "msg_poll") & \ (ttable.args == args) & \ (ttable.status.belongs(["RUNNING", "QUEUED", "ALLOCATED"]))) exists = db(query).select(ttable.id, limitby=(0, 1)).first() if exists: # Disable all db(query).update(status="STOPPED") return "Channel disabled" else: return "Channel already disabled" # -------------------------------------------------------------------------- @staticmethod def channel_disable_interactive(r, **attr): """ Disable a Channel - Remove schedule for Polling for new messages S3Method for interactive requests """ tablename = r.tablename result = current.s3db.msg_channel_disable(tablename, r.record.channel_id) current.session.confirmation = result fn = tablename.split("_", 1)[1] redirect(URL(f=fn)) # ------------------------------------------------------------------------- @staticmethod def channel_onaccept(form): """ Process the Enabled Flag """ if form.record: # Update form # Process if changed if form.record.enabled and not form.vars.enabled: current.s3db.msg_channel_disable(form.table._tablename, form.vars.channel_id) elif form.vars.enabled and not form.record.enabled: current.s3db.msg_channel_enable(form.table._tablename, form.vars.channel_id) else: # Create form # Process only if enabled if form.vars.enabled: current.s3db.msg_channel_enable(form.table._tablename, form.vars.channel_id) # ------------------------------------------------------------------------- @staticmethod def channel_poll(r, **attr): """ Poll a Channel for new messages S3Method for interactive requests """ tablename = r.tablename current.s3task.async("msg_poll", args=[tablename, r.record.channel_id]) current.session.confirmation = \ current.T("The poll request has been submitted, so new messages should appear shortly - refresh to see them") if tablename == "msg_email_channel": fn = "email_inbox" elif tablename == "msg_mcommons_channel": fn = "sms_inbox" elif tablename == "msg_rss_channel": fn = "rss" elif tablename == "msg_twilio_channel": fn = "sms_inbox" elif tablename == "msg_twitter_channel": fn = "twitter_inbox" else: return "Unsupported channel: %s" % tablename redirect(URL(f=fn)) # ============================================================================= class S3MessageModel(S3Model): """ Messages """ names = ("msg_message", "msg_message_id", "msg_message_represent", "msg_outbox", ) def model(self): T = current.T db = current.db UNKNOWN_OPT = current.messages.UNKNOWN_OPT configure = self.configure define_table = self.define_table # Message priority msg_priority_opts = {3 : T("High"), 2 : T("Medium"), 1 : T("Low"), } # --------------------------------------------------------------------- # Message Super Entity - all Inbound & Outbound Messages # message_types = Storage(msg_email = T("Email"), msg_facebook = T("Facebook"), msg_rss = T("RSS"), msg_sms = T("SMS"), msg_twitter = T("Twitter"), msg_twitter_result = T("Twitter Search Results"), ) tablename = "msg_message" self.super_entity(tablename, "message_id", message_types, # Knowing which Channel Incoming Messages # came in on allows correlation to Outbound # messages (campaign_message, deployment_alert, etc) self.msg_channel_id(), s3_datetime(default="now"), Field("body", "text", label = T("Message"), ), Field("from_address", label = T("From"), ), Field("to_address", label = T("To"), ), Field("inbound", "boolean", default = False, label = T("Direction"), represent = lambda direction: \ (direction and [T("In")] or \ [T("Out")])[0], ), ) # @todo: make lazy_table table = db[tablename] table.instance_type.readable = True table.instance_type.writable = True configure(tablename, list_fields = ["instance_type", "from_address", "to_address", "body", "inbound", ], ) # Reusable Field message_represent = S3Represent(lookup=tablename, fields=["body"]) message_id = S3ReusableField("message_id", "reference %s" % tablename, ondelete = "RESTRICT", represent = message_represent, requires = IS_EMPTY_OR( IS_ONE_OF_EMPTY(db, "msg_message.id")), ) self.add_components(tablename, msg_attachment = "message_id", deploy_response = "message_id", ) # --------------------------------------------------------------------- # Outbound Messages # # Show only the supported messaging methods MSG_CONTACT_OPTS = current.msg.MSG_CONTACT_OPTS # Maximum number of retries to send a message MAX_SEND_RETRIES = current.deployment_settings.get_msg_max_send_retries() # Valid message outbox statuses MSG_STATUS_OPTS = {1 : T("Unsent"), 2 : T("Sent"), 3 : T("Draft"), 4 : T("Invalid"), 5 : T("Failed"), } opt_msg_status = S3ReusableField("status", "integer", notnull=True, requires = IS_IN_SET(MSG_STATUS_OPTS, zero=None), default = 1, label = T("Status"), represent = lambda opt: \ MSG_STATUS_OPTS.get(opt, UNKNOWN_OPT)) # Outbox - needs to be separate to Message since a single message # sent needs different outbox entries for each recipient tablename = "msg_outbox" define_table(tablename, # FK not instance message_id(), # Person/Group to send the message out to: self.super_link("pe_id", "pr_pentity"), # If set used instead of picking up from pe_id: Field("address"), Field("contact_method", length=32, default = "EMAIL", label = T("Contact Method"), represent = lambda opt: \ MSG_CONTACT_OPTS.get(opt, UNKNOWN_OPT), requires = IS_IN_SET(MSG_CONTACT_OPTS, zero=None), ), opt_msg_status(), # Used to loop through a PE to get it's members Field("system_generated", "boolean", default = False, ), # Give up if we can't send after MAX_RETRIES Field("retries", "integer", default = MAX_SEND_RETRIES, readable = False, writable = False, ), *s3_meta_fields()) configure(tablename, list_fields = ["id", "message_id", "pe_id", "status", ], orderby = "msg_outbox.created_on desc", ) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) return dict(msg_message_id = message_id, msg_message_represent = message_represent, ) # ------------------------------------------------------------------------- @staticmethod def defaults(): """ Return safe defaults in case the model has been deactivated. """ dummy = S3ReusableField("dummy_id", "integer", readable = False, writable = False) return dict(msg_message_id = lambda **attr: dummy("message_id"), ) # ============================================================================= class S3MessageAttachmentModel(S3Model): """ Message Attachments - link table between msg_message & doc_document """ names = ("msg_attachment",) def model(self): # --------------------------------------------------------------------- # tablename = "msg_attachment" self.define_table(tablename, # FK not instance self.msg_message_id(ondelete="CASCADE"), self.doc_document_id(), *s3_meta_fields()) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) return {} # ============================================================================= class S3EmailModel(S3ChannelModel): """ Email InBound Channels Outbound Email is currently handled via deployment_settings InBox/OutBox """ names = ("msg_email_channel", "msg_email", ) def model(self): T = current.T configure = self.configure define_table = self.define_table set_method = self.set_method super_link = self.super_link # --------------------------------------------------------------------- # Email Inbound Channels # tablename = "msg_email_channel" define_table(tablename, # Instance super_link("channel_id", "msg_channel"), Field("name"), Field("description"), Field("enabled", "boolean", default = True, label = T("Enabled?"), represent = s3_yes_no_represent, ), Field("server"), Field("protocol", requires = IS_IN_SET(["imap", "pop3"], zero=None), ), Field("use_ssl", "boolean"), Field("port", "integer"), Field("username"), Field("password", "password", length=64, readable = False, requires = IS_NOT_EMPTY(), ), # Set true to delete messages from the remote # inbox after fetching them. Field("delete_from_server", "boolean"), *s3_meta_fields()) configure(tablename, onaccept = self.msg_channel_onaccept, super_entity = "msg_channel", ) set_method("msg", "email_channel", method = "enable", action = self.msg_channel_enable_interactive) set_method("msg", "email_channel", method = "disable", action = self.msg_channel_disable_interactive) set_method("msg", "email_channel", method = "poll", action = self.msg_channel_poll) # --------------------------------------------------------------------- # Email Messages: InBox & Outbox # sender = current.deployment_settings.get_mail_sender() tablename = "msg_email" define_table(tablename, # Instance super_link("message_id", "msg_message"), self.msg_channel_id(), s3_datetime(default = "now"), Field("subject", length=78, # RFC 2822 label = T("Subject"), ), Field("body", "text", label = T("Message"), ), Field("from_address", #notnull=True, default = sender, label = T("Sender"), requires = IS_EMAIL(), ), Field("to_address", label = T("To"), requires = IS_EMAIL(), ), Field("raw", "text", label = T("Message Source"), readable = False, writable = False, ), Field("inbound", "boolean", default = False, label = T("Direction"), represent = lambda direction: \ (direction and [T("In")] or [T("Out")])[0], ), *s3_meta_fields()) configure(tablename, orderby = "msg_email.date desc", super_entity = "msg_message", ) # Components self.add_components(tablename, # Used to link to custom tab deploy_response_select_mission: deploy_mission = {"name": "select", "link": "deploy_response", "joinby": "message_id", "key": "mission_id", "autodelete": False, }, ) # --------------------------------------------------------------------- return {} # ============================================================================= class S3FacebookModel(S3ChannelModel): """ Facebook Channels InBox/OutBox https://developers.facebook.com/docs/graph-api """ names = ("msg_facebook_channel", "msg_facebook", "msg_facebook_login", ) def model(self): T = current.T configure = self.configure define_table = self.define_table set_method = self.set_method super_link = self.super_link # --------------------------------------------------------------------- # Facebook Channels # tablename = "msg_facebook_channel" define_table(tablename, # Instance super_link("channel_id", "msg_channel"), Field("name"), Field("description"), Field("enabled", "boolean", default = True, label = T("Enabled?"), represent = s3_yes_no_represent, ), Field("login", "boolean", default = False, label = T("Use for Login?"), represent = s3_yes_no_represent, ), Field("app_id", "bigint", requires = IS_INT_IN_RANGE(0, +1e16) ), Field("app_secret", "password", length=64, readable = False, requires = IS_NOT_EMPTY(), ), # Optional Field("page_id", "bigint", requires = IS_INT_IN_RANGE(0, +1e16) ), Field("page_access_token"), *s3_meta_fields()) configure(tablename, onaccept = self.msg_facebook_channel_onaccept, super_entity = "msg_channel", ) set_method("msg", "facebook_channel", method = "enable", action = self.msg_channel_enable_interactive) set_method("msg", "facebook_channel", method = "disable", action = self.msg_channel_disable_interactive) #set_method("msg", "facebook_channel", # method = "poll", # action = self.msg_channel_poll) # --------------------------------------------------------------------- # Facebook Messages: InBox & Outbox # tablename = "msg_facebook" define_table(tablename, # Instance super_link("message_id", "msg_message"), self.msg_channel_id(), s3_datetime(default = "now"), Field("body", "text", label = T("Message"), ), # @ToDo: Are from_address / to_address relevant in Facebook? Field("from_address", #notnull=True, #default = sender, label = T("Sender"), ), Field("to_address", label = T("To"), ), Field("inbound", "boolean", default = False, label = T("Direction"), represent = lambda direction: \ (direction and [T("In")] or [T("Out")])[0], ), *s3_meta_fields()) configure(tablename, orderby = "msg_facebook.date desc", super_entity = "msg_message", ) # --------------------------------------------------------------------- return dict(msg_facebook_login = self.msg_facebook_login, ) # ------------------------------------------------------------------------- @staticmethod def defaults(): """ Safe defaults for model-global names if module is disabled """ return dict(msg_facebook_login = lambda: False, ) # ------------------------------------------------------------------------- @staticmethod def msg_facebook_channel_onaccept(form): if form.vars.login: # Ensure only a single account used for Login current.db(current.s3db.msg_facebook_channel.id != form.vars.id).update(login = False) # Normal onaccept processing S3ChannelModel.channel_onaccept(form) # ------------------------------------------------------------------------- @staticmethod def msg_facebook_login(): table = current.s3db.msg_facebook_channel query = (table.login == True) & \ (table.deleted == False) c = current.db(query).select(table.app_id, table.app_secret, limitby=(0, 1) ).first() return c # ============================================================================= class S3MCommonsModel(S3ChannelModel): """ Mobile Commons Inbound SMS Settings - Outbound can use Web API """ names = ("msg_mcommons_channel",) def model(self): #T = current.T define_table = self.define_table set_method = self.set_method # --------------------------------------------------------------------- tablename = "msg_mcommons_channel" define_table(tablename, self.super_link("channel_id", "msg_channel"), Field("name"), Field("description"), Field("enabled", "boolean", default = True, #label = T("Enabled?"), represent = s3_yes_no_represent, ), Field("campaign_id", length=128, unique=True, requires = IS_NOT_EMPTY(), ), Field("url", default = \ "https://secure.mcommons.com/api/messages", requires = IS_URL() ), Field("username", requires = IS_NOT_EMPTY(), ), Field("password", "password", readable = False, requires = IS_NOT_EMPTY(), ), Field("query"), Field("timestmp", "datetime", writable = False, ), *s3_meta_fields()) self.configure(tablename, onaccept = self.msg_channel_onaccept, super_entity = "msg_channel", ) set_method("msg", "mcommons_channel", method = "enable", action = self.msg_channel_enable_interactive) set_method("msg", "mcommons_channel", method = "disable", action = self.msg_channel_disable_interactive) set_method("msg", "mcommons_channel", method = "poll", action = self.msg_channel_poll) # --------------------------------------------------------------------- return {} # ============================================================================= class S3ParsingModel(S3Model): """ Message Parsing Model """ names = ("msg_parser", "msg_parsing_status", "msg_session", "msg_keyword", "msg_sender", "msg_parser_enabled", "msg_parser_enable", "msg_parser_disable", "msg_parser_enable_interactive", "msg_parser_disable_interactive", ) def model(self): T = current.T define_table = self.define_table set_method = self.set_method channel_id = self.msg_channel_id message_id = self.msg_message_id # --------------------------------------------------------------------- # Link between Message Channels and Parsers in parser.py # tablename = "msg_parser" define_table(tablename, # Source channel_id(ondelete = "CASCADE"), Field("function_name", label = T("Parser"), ), Field("enabled", "boolean", default = True, label = T("Enabled?"), represent = s3_yes_no_represent, ), *s3_meta_fields()) self.configure(tablename, onaccept = self.msg_parser_onaccept, ) set_method("msg", "parser", method = "enable", action = self.parser_enable_interactive) set_method("msg", "parser", method = "disable", action = self.parser_disable_interactive) set_method("msg", "parser", method = "parse", action = self.parser_parse) # --------------------------------------------------------------------- # Message parsing status # - component to core msg_message table # tablename = "msg_parsing_status" define_table(tablename, # Component, not Instance message_id(ondelete = "CASCADE"), # Source channel_id(ondelete = "CASCADE"), Field("is_parsed", "boolean", default = False, label = T("Parsing Status"), represent = lambda parsed: \ (parsed and [T("Parsed")] or \ [T("Not Parsed")])[0], ), message_id("reply_id", label = T("Reply"), ondelete = "CASCADE", ), *s3_meta_fields()) # --------------------------------------------------------------------- # Login sessions for Message Parsing # - links a from_address with a login until expiry # tablename = "msg_session" define_table(tablename, Field("from_address"), Field("email"), Field("created_datetime", "datetime", default = current.request.utcnow, ), Field("expiration_time", "integer"), Field("is_expired", "boolean", default = False, ), *s3_meta_fields()) # --------------------------------------------------------------------- # Keywords for Message Parsing # tablename = "msg_keyword" define_table(tablename, Field("keyword", label = T("Keyword"), ), # @ToDo: Move this to a link table self.event_incident_type_id(), *s3_meta_fields()) # --------------------------------------------------------------------- # Senders for Message Parsing # - whitelist / blacklist / prioritise # tablename = "msg_sender" define_table(tablename, Field("sender", label = T("Sender"), ), # @ToDo: Make pe_id work for this #self.super_link("pe_id", "pr_pentity"), Field("priority", "integer", label = T("Priority"), ), *s3_meta_fields()) # --------------------------------------------------------------------- return dict(msg_parser_enabled = self.parser_enabled, msg_parser_enable = self.parser_enable, msg_parser_disable = self.parser_disable, ) # ----------------------------------------------------------------------------- @staticmethod def parser_parse(r, **attr): """ Parse unparsed messages S3Method for interactive requests """ record = r.record current.s3task.async("msg_parse", args=[record.channel_id, record.function_name]) current.session.confirmation = \ current.T("The parse request has been submitted") redirect(URL(f="parser")) # ------------------------------------------------------------------------- @staticmethod def parser_enabled(channel_id): """ Helper function to see if there is a Parser connected to a Channel - used to determine whether to populate the msg_parsing_status table """ table = current.s3db.msg_parser record = current.db(table.channel_id == channel_id).select(table.enabled, limitby=(0, 1), ).first() if record and record.enabled: return True else: return False # ------------------------------------------------------------------------- @staticmethod def parser_enable(id): """ Enable a Parser - Connect a Parser to a Channel CLI API for shell scripts & to be called by S3Method @ToDo: Ensure only 1 Parser is connected to any Channel at a time """ db = current.db s3db = current.s3db table = s3db.msg_parser record = db(table.id == id).select(table.id, # needed for update_record table.enabled, table.channel_id, table.function_name, limitby=(0, 1), ).first() if not record.enabled: # Flag it as enabled record.update_record(enabled = True) channel_id = record.channel_id function_name = record.function_name # Do we have an existing Task? ttable = db.scheduler_task args = '[%s, "%s"]' % (channel_id, function_name) query = ((ttable.function_name == "msg_parse") & \ (ttable.args == args) & \ (ttable.status.belongs(["RUNNING", "QUEUED", "ALLOCATED"]))) exists = db(query).select(ttable.id, limitby=(0, 1)).first() if exists: return "Parser already enabled" else: current.s3task.schedule_task("msg_parse", args = [channel_id, function_name], period = 300, # seconds timeout = 300, # seconds repeats = 0 # unlimited ) return "Parser enabled" # ------------------------------------------------------------------------- @staticmethod def parser_enable_interactive(r, **attr): """ Enable a Parser - Connect a Parser to a Channel S3Method for interactive requests """ result = current.s3db.msg_parser_enable(r.id) current.session.confirmation = result redirect(URL(f="parser")) # ------------------------------------------------------------------------- @staticmethod def parser_disable(id): """ Disable a Parser - Disconnect a Parser from a Channel CLI API for shell scripts & to be called by S3Method """ db = current.db s3db = current.s3db table = s3db.msg_parser record = db(table.id == id).select(table.id, # needed for update_record table.enabled, table.channel_id, table.function_name, limitby=(0, 1), ).first() if record.enabled: # Flag it as disabled record.update_record(enabled = False) # Do we have an existing Task? ttable = db.scheduler_task args = '[%s, "%s"]' % (record.channel_id, record.function_name) query = ((ttable.function_name == "msg_parse") & \ (ttable.args == args) & \ (ttable.status.belongs(["RUNNING", "QUEUED", "ALLOCATED"]))) exists = db(query).select(ttable.id, limitby=(0, 1)).first() if exists: # Disable all db(query).update(status="STOPPED") return "Parser disabled" else: return "Parser already disabled" # ------------------------------------------------------------------------- @staticmethod def parser_disable_interactive(r, **attr): """ Disable a Parser - Disconnect a Parser from a Channel S3Method for interactive requests """ result = current.s3db.msg_parser_disable(r.id) current.session.confirmation = result redirect(URL(f="parser")) # ------------------------------------------------------------------------- @staticmethod def msg_parser_onaccept(form): """ Process the Enabled Flag """ if form.record: # Update form # process of changed if form.record.enabled and not form.vars.enabled: current.s3db.msg_parser_disable(form.vars.id) elif form.vars.enabled and not form.record.enabled: current.s3db.msg_parser_enable(form.vars.id) else: # Create form # Process only if enabled if form.vars.enabled: current.s3db.msg_parser_enable(form.vars.id) # ============================================================================= class S3RSSModel(S3ChannelModel): """ RSS channel """ names = ("msg_rss_channel", "msg_rss", ) def model(self): T = current.T define_table = self.define_table set_method = self.set_method super_link = self.super_link # --------------------------------------------------------------------- # RSS Settings for an account # tablename = "msg_rss_channel" define_table(tablename, # Instance super_link("channel_id", "msg_channel"), Field("name", length=255, unique=True, label = T("Name"), ), Field("description", label = T("Description"), ), Field("enabled", "boolean", default = True, label = T("Enabled?"), represent = s3_yes_no_represent, ), Field("url", label = T("URL"), requires = IS_URL(), ), s3_datetime(label = T("Last Polled"), writable = False, ), Field("etag", label = T("ETag"), writable = False ), *s3_meta_fields()) self.configure(tablename, list_fields = ["name", "description", "enabled", "url", "date", "channel_status.status", ], onaccept = self.msg_channel_onaccept, super_entity = "msg_channel", ) set_method("msg", "rss_channel", method = "enable", action = self.msg_channel_enable_interactive) set_method("msg", "rss_channel", method = "disable", action = self.msg_channel_disable_interactive) set_method("msg", "rss_channel", method = "poll", action = self.msg_channel_poll) # --------------------------------------------------------------------- # RSS Feed Posts # tablename = "msg_rss" define_table(tablename, # Instance super_link("message_id", "msg_message"), self.msg_channel_id(), s3_datetime(default="now", label = T("Published on"), ), Field("title", label = T("Title"), ), Field("body", "text", label = T("Content"), ), Field("from_address", label = T("Link"), ), # http://pythonhosted.org/feedparser/reference-feed-author_detail.html Field("author", label = T("Author"), ), # http://pythonhosted.org/feedparser/reference-entry-tags.html Field("tags", "list:string", label = T("Tags"), ), self.gis_location_id(), # Just present for Super Entity Field("inbound", "boolean", default = True, readable = False, writable = False, ), *s3_meta_fields()) self.configure(tablename, deduplicate = self.msg_rss_duplicate, list_fields = ["channel_id", "title", "from_address", "date", "body" ], super_entity = current.s3db.msg_message, ) # --------------------------------------------------------------------- return {} # --------------------------------------------------------------------- @staticmethod def msg_rss_duplicate(item): """ Import item deduplication, match by link (from_address) @param item: the S3ImportItem instance """ from_address = item.data.get("from_address") table = item.table query = (table.from_address == from_address) duplicate = current.db(query).select(table.id, limitby=(0, 1)).first() if duplicate: item.id = duplicate.id item.method = item.METHOD.UPDATE # ============================================================================= class S3SMSModel(S3Model): """ SMS: Short Message Service These can be received through a number of different gateways - MCommons - Modem (@ToDo: Restore this) - Tropo - Twilio """ names = ("msg_sms",) def model(self): #T = current.T user = current.auth.user if user and user.organisation_id: # SMS Messages need to be tagged to their org so that they can be sent through the correct gateway default = user.organisation_id else: default = None # --------------------------------------------------------------------- # SMS Messages: InBox & Outbox # tablename = "msg_sms" self.define_table(tablename, # Instance self.super_link("message_id", "msg_message"), self.msg_channel_id(), self.org_organisation_id(default = default), s3_datetime(default="now"), Field("body", "text", # Allow multi-part SMS #length = 160, #label = T("Message"), ), Field("from_address", #label = T("Sender"), ), Field("to_address", #label = T("To"), ), Field("inbound", "boolean", default = False, #represent = lambda direction: \ # (direction and [T("In")] or \ # [T("Out")])[0], #label = T("Direction")), ), # Used e.g. for Clickatell Field("remote_id", #label = T("Remote ID"), ), *s3_meta_fields()) self.configure(tablename, super_entity = "msg_message", ) # --------------------------------------------------------------------- return {} # ============================================================================= class S3SMSOutboundModel(S3Model): """ SMS: Short Message Service - Outbound Channels These can be sent through a number of different gateways - Modem - SMTP - Tropo - Web API (inc Clickatell, MCommons, mVaayoo) """ names = ("msg_sms_outbound_gateway", "msg_sms_modem_channel", "msg_sms_smtp_channel", "msg_sms_webapi_channel", ) def model(self): #T = current.T configure = self.configure define_table = self.define_table settings = current.deployment_settings # --------------------------------------------------------------------- # SMS Outbound Gateway # - select which gateway is in active use for which Organisation/Branch # country_code = settings.get_L10n_default_country_code() tablename = "msg_sms_outbound_gateway" define_table(tablename, self.msg_channel_id( requires = IS_ONE_OF(current.db, "msg_channel.channel_id", S3Represent(lookup="msg_channel"), instance_types = ("msg_sms_modem_channel", "msg_sms_webapi_channel", "msg_sms_smtp_channel", ), sort = True, ), ), #Field("outgoing_sms_handler", length=32, # requires = IS_IN_SET(current.msg.GATEWAY_OPTS, # zero = None), # ), # Allow selection of different gateways based on Organisation/Branch self.org_organisation_id(), # @ToDo: Allow selection of different gateways based on destination Location #self.gis_location_id(), Field("default_country_code", "integer", default = country_code, ), *s3_meta_fields()) # --------------------------------------------------------------------- # SMS Modem Channel # tablename = "msg_sms_modem_channel" define_table(tablename, self.super_link("channel_id", "msg_channel"), Field("name"), Field("description"), Field("modem_port"), Field("modem_baud", "integer", default = 115200, ), Field("enabled", "boolean", default = True, ), Field("max_length", "integer", default = 160, ), *s3_meta_fields()) configure(tablename, super_entity = "msg_channel", ) # --------------------------------------------------------------------- # SMS via SMTP Channel # tablename = "msg_sms_smtp_channel" define_table(tablename, self.super_link("channel_id", "msg_channel"), Field("name"), Field("description"), Field("address", length=64, requires = IS_NOT_EMPTY(), ), Field("subject", length=64), Field("enabled", "boolean", default = True, ), Field("max_length", "integer", default = 160, ), *s3_meta_fields()) configure(tablename, super_entity = "msg_channel", ) # --------------------------------------------------------------------- # Settings for Web API services # # @ToDo: Simplified dropdown of services which prepopulates entries & provides nice prompts for the config options # + Advanced mode for raw access to real fields # # https://www.twilio.com/docs/api/rest/sending-messages # tablename = "msg_sms_webapi_channel" define_table(tablename, self.super_link("channel_id", "msg_channel"), Field("name"), Field("description"), Field("url", #default = "http://sms1.cardboardfish.com:9001/HTTPSMS?", # Cardboardfish default = "https://api.clickatell.com/http/sendmsg", # Clickatell #default = "https://secure.mcommons.com/api/send_message", # Mobile Commons #default = "https://www.textmagic.com/app/api", # Text Magic #default = "https://api.twilio.com/2010-04-01/Accounts/{AccountSid}/Messages", # Twilio (Untested) requires = IS_URL(), ), Field("parameters", #default = "S=H&UN=yourusername&P=yourpassword&SA=Sahana", # Cardboardfish default = "user=yourusername&password=yourpassword&api_id=yourapiid", # Clickatell #default = "campaign_id=yourid", # Mobile Commons #default = "username=yourusername&password=yourpassword&cmd=send&unicode=1", # Text Magic #default = "From={RegisteredTelNumber}", # Twilio (Untested) ), Field("message_variable", "string", #default = "M", # Cardboardfish default = "text", # Clickatell, Text Magic #default = "body", # Mobile Commons #default = "Body", # Twilio (Untested) requires = IS_NOT_EMPTY(), ), Field("to_variable", "string", #default = "DA", # Cardboardfish default = "to", # Clickatell #default = "phone_number", # Mobile Commons #default = "phone", # Text Magic #default = "To", # Twilio (Untested) requires = IS_NOT_EMPTY(), ), Field("max_length", "integer", default = 480, # Clickatell concat 3 ), # If using HTTP Auth (e.g. Mobile Commons) Field("username"), Field("password"), Field("enabled", "boolean", default = True, ), *s3_meta_fields()) configure(tablename, super_entity = "msg_channel", ) # --------------------------------------------------------------------- return {} # ============================================================================= class S3TropoModel(S3Model): """ Tropo can be used to send & receive SMS, Twitter & XMPP https://www.tropo.com """ names = ("msg_tropo_channel", "msg_tropo_scratch", ) def model(self): #T = current.T define_table = self.define_table set_method = self.set_method # --------------------------------------------------------------------- # Tropo Channels # tablename = "msg_tropo_channel" define_table(tablename, self.super_link("channel_id", "msg_channel"), Field("name"), Field("description"), Field("enabled", "boolean", default = True, #label = T("Enabled?"), represent = s3_yes_no_represent, ), Field("token_messaging"), #Field("token_voice"), *s3_meta_fields()) self.configure(tablename, super_entity = "msg_channel", ) set_method("msg", "tropo_channel", method = "enable", action = self.msg_channel_enable_interactive) set_method("msg", "tropo_channel", method = "disable", action = self.msg_channel_disable_interactive) set_method("msg", "tropo_channel", method = "poll", action = self.msg_channel_poll) # --------------------------------------------------------------------- # Tropo Scratch pad for outbound messaging # tablename = "msg_tropo_scratch" define_table(tablename, Field("row_id", "integer"), Field("message_id", "integer"), Field("recipient"), Field("message"), Field("network"), ) # --------------------------------------------------------------------- return {} # ============================================================================= class S3TwilioModel(S3ChannelModel): """ Twilio Inbound SMS channel - for Outbound, use Web API """ names = ("msg_twilio_channel", "msg_twilio_sid", ) def model(self): #T = current.T define_table = self.define_table set_method = self.set_method # --------------------------------------------------------------------- # Twilio Channels # tablename = "msg_twilio_channel" define_table(tablename, # Instance self.super_link("channel_id", "msg_channel"), Field("name"), Field("description"), Field("enabled", "boolean", default = True, #label = T("Enabled?"), represent = s3_yes_no_represent, ), Field("account_name", length=255, unique=True), Field("url", default = \ "https://api.twilio.com/2010-04-01/Accounts" ), Field("account_sid", length=64, requires = IS_NOT_EMPTY(), ), Field("auth_token", "password", length=64, readable = False, requires = IS_NOT_EMPTY(), ), *s3_meta_fields()) self.configure(tablename, onaccept = self.msg_channel_onaccept, super_entity = "msg_channel", ) set_method("msg", "twilio_channel", method = "enable", action = self.msg_channel_enable_interactive) set_method("msg", "twilio_channel", method = "disable", action = self.msg_channel_disable_interactive) set_method("msg", "twilio_channel", method = "poll", action = self.msg_channel_poll) # --------------------------------------------------------------------- # Twilio Message extensions # - store message sid to know which ones we've already downloaded # tablename = "msg_twilio_sid" define_table(tablename, # Component not Instance self.msg_message_id(ondelete = "CASCADE"), Field("sid"), *s3_meta_fields()) # --------------------------------------------------------------------- return {} # ============================================================================= class S3TwitterModel(S3Model): names = ("msg_twitter_channel", "msg_twitter", ) def model(self): T = current.T db = current.db configure = self.configure define_table = self.define_table set_method = self.set_method # --------------------------------------------------------------------- # Twitter Channel # password_widget = S3PasswordWidget() tablename = "msg_twitter_channel" define_table(tablename, #Instance self.super_link("channel_id", "msg_channel"), # @ToDo: Allow different Twitter accounts for different PEs (Orgs / Teams) #self.pr_pe_id(), Field("name", label = T("Name"), ), Field("description", label = T("Description"), ), Field("enabled", "boolean", default = True, label = T("Enabled?"), represent = s3_yes_no_represent, ), Field("login", "boolean", default = False, label = T("Use for Login?"), represent = s3_yes_no_represent, ), Field("twitter_account", label = T("Twitter Account"), ), # Get these from https://apps.twitter.com Field("consumer_key", "password", label = T("Consumer Key"), widget = password_widget, ), Field("consumer_secret", "password", label = T("Consumer Secret"), widget = password_widget, ), Field("access_token", "password", label = T("Access Token"), widget = password_widget, ), Field("access_token_secret", "password", label = T("Access Token Secret"), widget = password_widget, ), *s3_meta_fields()) configure(tablename, onaccept = self.twitter_channel_onaccept, #onvalidation = self.twitter_channel_onvalidation super_entity = "msg_channel", ) set_method("msg", "twitter_channel", method = "enable", action = self.msg_channel_enable_interactive) set_method("msg", "twitter_channel", method = "disable", action = self.msg_channel_disable_interactive) set_method("msg", "twitter_channel", method = "poll", action = self.msg_channel_poll) # --------------------------------------------------------------------- # Twitter Messages: InBox & Outbox # tablename = "msg_twitter" define_table(tablename, # Instance self.super_link("message_id", "msg_message"), self.msg_channel_id(), s3_datetime(default = "now", label = T("Posted on"), ), Field("body", length=140, label = T("Message"), ), Field("from_address", #notnull=True, label = T("From"), represent = self.twitter_represent, requires = IS_NOT_EMPTY(), ), Field("to_address", label = T("To"), represent = self.twitter_represent, ), Field("inbound", "boolean", default = False, label = T("Direction"), represent = lambda direction: \ (direction and [T("In")] or \ [T("Out")])[0], ), Field("msg_id", # Twitter Message ID readable = False, writable = False, ), *s3_meta_fields()) configure(tablename, list_fields = ["id", #"priority", #"category", "body", "from_address", "date", #"location_id", ], #orderby = ~table.priority, super_entity = "msg_message", ) # --------------------------------------------------------------------- return {} # ------------------------------------------------------------------------- @staticmethod def twitter_represent(nickname, show_link=True): """ Represent a Twitter account """ if not nickname: return current.messages["NONE"] db = current.db s3db = current.s3db table = s3db.pr_contact query = (table.contact_method == "TWITTER") & \ (table.value == nickname) row = db(query).select(table.pe_id, limitby=(0, 1)).first() if row: repr = s3db.pr_pentity_represent(row.pe_id) if show_link: # Assume person ptable = s3db.pr_person row = db(ptable.pe_id == row.pe_id).select(ptable.id, limitby=(0, 1)).first() if row: link = URL(c="pr", f="person", args=[row.id]) return A(repr, _href=link) return repr else: return nickname # ------------------------------------------------------------------------- @staticmethod def twitter_channel_onaccept(form): if form.vars.login: # Ensure only a single account used for Login current.db(current.s3db.msg_twitter_channel.id != form.vars.id).update(login = False) # Normal onaccept processing S3ChannelModel.channel_onaccept(form) # ------------------------------------------------------------------------- @staticmethod def twitter_channel_onvalidation(form): """ Complete oauth: take tokens from session + pin from form, and do the 2nd API call to Twitter """ T = current.T session = current.session settings = current.deployment_settings.msg s3 = session.s3 form_vars = form.vars if form_vars.pin and s3.twitter_request_key and s3.twitter_request_secret: try: import tweepy except: raise HTTP(501, body=T("Can't import tweepy")) oauth = tweepy.OAuthHandler(settings.twitter_oauth_consumer_key, settings.twitter_oauth_consumer_secret) oauth.set_request_token(s3.twitter_request_key, s3.twitter_request_secret) try: oauth.get_access_token(form_vars.pin) form_vars.oauth_key = oauth.access_token.key form_vars.oauth_secret = oauth.access_token.secret twitter = tweepy.API(oauth) form_vars.twitter_account = twitter.me().screen_name form_vars.pin = "" # we won't need it anymore return except tweepy.TweepError: session.error = T("Settings were reset because authenticating with Twitter failed") # Either user asked to reset, or error - clear everything for k in ["oauth_key", "oauth_secret", "twitter_account"]: form_vars[k] = None for k in ["twitter_request_key", "twitter_request_secret"]: s3[k] = "" # ============================================================================= class S3TwitterSearchModel(S3ChannelModel): """ Twitter Searches - results can be fed to KeyGraph https://dev.twitter.com/docs/api/1.1/get/search/tweets """ names = ("msg_twitter_search", "msg_twitter_result", ) def model(self): T = current.T db = current.db configure = self.configure define_table = self.define_table set_method = self.set_method # --------------------------------------------------------------------- # Twitter Search Query # tablename = "msg_twitter_search" define_table(tablename, Field("keywords", "text", label = T("Keywords"), ), # @ToDo: Allow setting a Point & Radius for filtering by geocode #self.gis_location_id(), Field("lang", # Set in controller #default = current.response.s3.language, label = T("Language"), ), Field("count", "integer", default = 100, label = T("# Results per query"), ), Field("include_entities", "boolean", default = False, label = T("Include Entity Information?"), represent = s3_yes_no_represent, comment = DIV(_class="tooltip", _title="%s|%s" % (T("Entity Information"), T("This is required if analyzing with KeyGraph."))), ), # @ToDo: Rename or even move to Component Table Field("is_processed", "boolean", default = False, label = T("Processed with KeyGraph?"), represent = s3_yes_no_represent, ), Field("is_searched", "boolean", default = False, label = T("Searched?"), represent = s3_yes_no_represent, ), *s3_meta_fields()) configure(tablename, list_fields = ["keywords", "lang", "count", #"include_entities", ], ) # Reusable Query ID represent = S3Represent(lookup=tablename, fields=["keywords"]) search_id = S3ReusableField("search_id", "reference %s" % tablename, label = T("Search Query"), ondelete = "CASCADE", represent = represent, requires = IS_EMPTY_OR( IS_ONE_OF_EMPTY(db, "msg_twitter_search.id") ), ) set_method("msg", "twitter_search", method = "poll", action = self.twitter_search_poll) set_method("msg", "twitter_search", method = "keygraph", action = self.twitter_keygraph) set_method("msg", "twitter_result", method = "timeline", action = self.twitter_timeline) # --------------------------------------------------------------------- # Twitter Search Results # # @ToDo: Store the places mentioned in the Tweet as linked Locations # tablename = "msg_twitter_result" define_table(tablename, # Instance self.super_link("message_id", "msg_message"), # Just present for Super Entity #self.msg_channel_id(), search_id(), s3_datetime(default="now", label = T("Tweeted on"), ), Field("tweet_id", label = T("Tweet ID")), Field("lang", label = T("Language")), Field("from_address", label = T("Tweeted by")), Field("body", label = T("Tweet")), # @ToDo: Populate from Parser #Field("category", # writable = False, # label = T("Category"), # ), #Field("priority", "integer", # writable = False, # label = T("Priority"), # ), self.gis_location_id(), # Just present for Super Entity #Field("inbound", "boolean", # default = True, # readable = False, # writable = False, # ), *s3_meta_fields()) configure(tablename, list_fields = [#"category", #"priority", "body", "from_address", "date", "location_id", ], #orderby=~table.priority, super_entity = "msg_message", ) # --------------------------------------------------------------------- return {} # ----------------------------------------------------------------------------- @staticmethod def twitter_search_poll(r, **attr): """ Perform a Search of Twitter S3Method for interactive requests """ id = r.id tablename = r.tablename current.s3task.async("msg_twitter_search", args=[id]) current.session.confirmation = \ current.T("The search request has been submitted, so new messages should appear shortly - refresh to see them") # Filter results to this Search redirect(URL(f="twitter_result", vars={"~.search_id": id})) # ----------------------------------------------------------------------------- @staticmethod def twitter_keygraph(r, **attr): """ Prcoess Search Results with KeyGraph S3Method for interactive requests """ tablename = r.tablename current.s3task.async("msg_process_keygraph", args=[r.id]) current.session.confirmation = \ current.T("The search results are now being processed with KeyGraph") # @ToDo: Link to KeyGraph results redirect(URL(f="twitter_result")) # ============================================================================= @staticmethod def twitter_timeline(r, **attr): """ Display the Tweets on a Simile Timeline http://www.simile-widgets.org/wiki/Reference_Documentation_for_Timeline """ if r.representation == "html" and r.name == "twitter_result": response = current.response s3 = response.s3 appname = r.application # Add core Simile Code s3.scripts.append("/%s/static/scripts/simile/timeline/timeline-api.js" % appname) # Add our control script if s3.debug: s3.scripts.append("/%s/static/scripts/S3/s3.timeline.js" % appname) else: s3.scripts.append("/%s/static/scripts/S3/s3.timeline.min.js" % appname) # Add our data # @ToDo: Make this the initial data & then collect extra via REST with a stylesheet # add in JS using S3.timeline.eventSource.addMany(events) where events is a [] if r.record: # Single record rows = [r.record] else: # Multiple records # @ToDo: Load all records & sort to closest in time # http://stackoverflow.com/questions/7327689/how-to-generate-a-sequence-of-future-datetimes-in-python-and-determine-nearest-d rows = r.resource.select(["date", "body"], limit=2000, as_rows=True) data = {"dateTimeFormat": "iso8601", } now = r.utcnow tl_start = tl_end = now events = [] import re for row in rows: # Dates start = row.date or "" if start: if start < tl_start: tl_start = start if start > tl_end: tl_end = start start = start.isoformat() title = (re.sub(r"(?<=^|(?<=[^a-zA-Z0-9-_\.]))@([A-Za-z]+[A-Za-z0-9]+)|RT", "", row.body)) if len(title) > 30: title = title[:30] events.append({"start": start, "title": title, "description": row.body, }) data["events"] = events data = json.dumps(data, separators=SEPARATORS) code = "".join(( '''S3.timeline.data=''', data, ''' S3.timeline.tl_start="''', tl_start.isoformat(), '''" S3.timeline.tl_end="''', tl_end.isoformat(), '''" S3.timeline.now="''', now.isoformat(), '''" ''')) # Control our code in static/scripts/S3/s3.timeline.js s3.js_global.append(code) # Create the DIV item = DIV(_id="s3timeline", _class="s3-timeline") output = dict(item=item) # Maintain RHeader for consistency if attr.get("rheader"): rheader = attr["rheader"](r) if rheader: output["rheader"] = rheader output["title"] = current.T("Twitter Timeline") response.view = "timeline.html" return output else: r.error(405, current.ERROR.BAD_METHOD) # ============================================================================= class S3XFormsModel(S3Model): """ XForms are used by the ODK Collect mobile client http://eden.sahanafoundation.org/wiki/BluePrint/Mobile#Android """ names = ("msg_xforms_store",) def model(self): #T = current.T # --------------------------------------------------------------------- # SMS store for persistence and scratch pad for combining incoming xform chunks tablename = "msg_xforms_store" self.define_table(tablename, Field("sender", length=20), Field("fileno", "integer"), Field("totalno", "integer"), Field("partno", "integer"), Field("message", length=160) ) # --------------------------------------------------------------------- return {} # ============================================================================= class S3BaseStationModel(S3Model): """ Base Stations (Cell Towers) are a type of Site @ToDo: Calculate Coverage from Antenna Height, Radio Power and Terrain - see RadioMobile """ names = ("msg_basestation",) def model(self): T = current.T # --------------------------------------------------------------------- # Base Stations (Cell Towers) # if current.deployment_settings.get_msg_basestation_code_unique(): db = current.db code_unique = IS_EMPTY_OR(IS_NOT_IN_DB(db, "msg_basestation.code")) else: code_unique = None tablename = "msg_basestation" self.define_table(tablename, self.super_link("site_id", "org_site"), Field("name", notnull=True, length=64, # Mayon Compatibility label = T("Name"), ), Field("code", length=10, # Mayon compatibility label = T("Code"), requires = code_unique, ), self.org_organisation_id( label = T("Operator"), requires = self.org_organisation_requires(required=True, updateable=True), #widget=S3OrganisationAutocompleteWidget(default_from_profile=True), ), self.gis_location_id(), s3_comments(), *s3_meta_fields()) # CRUD strings current.response.s3.crud_strings[tablename] = Storage( label_create=T("Create Base Station"), title_display=T("Base Station Details"), title_list=T("Base Stations"), title_update=T("Edit Base Station"), title_upload=T("Import Base Stations"), title_map=T("Map of Base Stations"), label_list_button=T("List Base Stations"), label_delete_button=T("Delete Base Station"), msg_record_created=T("Base Station added"), msg_record_modified=T("Base Station updated"), msg_record_deleted=T("Base Station deleted"), msg_list_empty=T("No Base Stations currently registered")) self.configure(tablename, deduplicate = self.msg_basestation_duplicate, super_entity = "org_site", ) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # return {} # --------------------------------------------------------------------- @staticmethod def msg_basestation_duplicate(item): """ Import item deduplication, match by name (Adding location_id doesn't seem to be a good idea) @param item: the S3ImportItem instance """ name = item.data.get("name") table = item.table query = (table.name.lower() == name.lower()) #location_id = None # if "location_id" in item.data: # location_id = item.data.location_id ## This doesn't find deleted records: # query = query & (table.location_id == location_id) duplicate = current.db(query).select(table.id, limitby=(0, 1)).first() # if duplicate is None and location_id: ## Search for deleted basestations with this name # query = (table.name.lower() == name.lower()) & \ # (table.deleted == True) # row = db(query).select(table.id, table.deleted_fk, # limitby=(0, 1)).first() # if row: # fkeys = json.loads(row.deleted_fk) # if "location_id" in fkeys and \ # str(fkeys["location_id"]) == str(location_id): # duplicate = row if duplicate: item.id = duplicate.id item.method = item.METHOD.UPDATE # END =========================================================================
mit
pbrunet/pythran
pythran/tests/euler/euler41.py
5
2648
#runas solve() #unittest.skip recursive generator #pythran export solve() ''' From O'Reilly's Python Cookbook ''' def _combinators(_handle, items, n): if n==0: yield [] return for i, item in enumerate(items): this_one = [ item ] for cc in _combinators(_handle, _handle(items, i), n-1): yield this_one + cc def combinations(items, n): ''' take n distinct items, order matters ''' def skipIthItem(items, i): return items[:i] + items[i+1:] return _combinators(skipIthItem, items, n) def uniqueCombinations(items, n): ''' take n distinct items, order is irrelevant ''' def afterIthItem(items, i): return items[i+1:] return _combinators(afterIthItem, items, n) def selections(items, n): ''' take n (not necessarily distinct) items, order matters ''' def keepAllItems(items, i): return items return _combinators(keepAllItems, items, n) def permutations(items): ''' take all items, order matters ''' return combinations(items, len(items)) def solve(): ''' We shall say that an n-digit number is pandigital if it makes use of all the digits 1 to n exactly once. For example, 2143 is a 4-digit pandigital and is also prime. What is the largest n-digit pandigital prime that exists? ''' prime_list = [2, 3, 5, 7, 11, 13, 17, 19, 23] # Ensure that this is initialised with at least 1 prime prime_dict = dict.fromkeys(prime_list, 1) def _isprime(n): ''' Raw check to see if n is prime. Assumes that prime_list is already populated ''' isprime = n >= 2 and 1 or 0 for prime in prime_list: # Check for factors with all primes if prime * prime > n: break # ... up to sqrt(n) if not n % prime: isprime = 0 break if isprime: prime_dict[n] = 1 # Maintain a dictionary for fast lookup return isprime def _refresh(x): ''' Refreshes primes upto x ''' lastn = prime_list[-1] while lastn <= x: # Keep working until we've got up to x lastn = lastn + 1 # Check the next number if _isprime(lastn): prime_list.append(lastn) # Maintain a list for sequential access # Pan-digital primes are 4 or 7 digits. Others divisible by 3 _refresh(2766) # sqrt(7654321) for perm in permutations(range(7, 0, -1)): num = 0 for n in perm: num = num * 10 + n if _isprime(num): return num break
bsd-3-clause
kimegitee/deep-learning
language-translation/helper.py
156
2683
import os import pickle import copy import numpy as np CODES = {'<PAD>': 0, '<EOS>': 1, '<UNK>': 2, '<GO>': 3 } def load_data(path): """ Load Dataset from File """ input_file = os.path.join(path) with open(input_file, 'r', encoding='utf-8') as f: return f.read() def preprocess_and_save_data(source_path, target_path, text_to_ids): """ Preprocess Text Data. Save to to file. """ # Preprocess source_text = load_data(source_path) target_text = load_data(target_path) source_text = source_text.lower() target_text = target_text.lower() source_vocab_to_int, source_int_to_vocab = create_lookup_tables(source_text) target_vocab_to_int, target_int_to_vocab = create_lookup_tables(target_text) source_text, target_text = text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int) # Save Data with open('preprocess.p', 'wb') as out_file: pickle.dump(( (source_text, target_text), (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab)), out_file) def load_preprocess(): """ Load the Preprocessed Training data and return them in batches of <batch_size> or less """ with open('preprocess.p', mode='rb') as in_file: return pickle.load(in_file) def create_lookup_tables(text): """ Create lookup tables for vocabulary """ vocab = set(text.split()) vocab_to_int = copy.copy(CODES) for v_i, v in enumerate(vocab, len(CODES)): vocab_to_int[v] = v_i int_to_vocab = {v_i: v for v, v_i in vocab_to_int.items()} return vocab_to_int, int_to_vocab def save_params(params): """ Save parameters to file """ with open('params.p', 'wb') as out_file: pickle.dump(params, out_file) def load_params(): """ Load parameters from file """ with open('params.p', mode='rb') as in_file: return pickle.load(in_file) def batch_data(source, target, batch_size): """ Batch source and target together """ for batch_i in range(0, len(source)//batch_size): start_i = batch_i * batch_size source_batch = source[start_i:start_i + batch_size] target_batch = target[start_i:start_i + batch_size] yield np.array(pad_sentence_batch(source_batch)), np.array(pad_sentence_batch(target_batch)) def pad_sentence_batch(sentence_batch): """ Pad sentence with <PAD> id """ max_sentence = max([len(sentence) for sentence in sentence_batch]) return [sentence + [CODES['<PAD>']] * (max_sentence - len(sentence)) for sentence in sentence_batch]
mit
arborh/tensorflow
tensorflow/python/keras/layers/recurrent_v2.py
3
67355
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Recurrent layers for TF 2.0. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import uuid from tensorflow.python.eager import context from tensorflow.python.eager import function from tensorflow.python.framework import constant_op from tensorflow.python.framework import device from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.keras import backend as K from tensorflow.python.keras.engine.input_spec import InputSpec from tensorflow.python.keras.layers import recurrent from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_cudnn_rnn_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import state_ops from tensorflow.python.util.tf_export import keras_export # The following string constants are used by Defun approach for unified backend # of LSTM and GRU. _DEFUN_API_NAME_ATTRIBUTE = 'api_implements' _DEFUN_DEVICE_ATTRIBUTE = 'api_preferred_device' _CPU_DEVICE_NAME = 'CPU' _GPU_DEVICE_NAME = 'GPU' # The following number constants are used to represent the runtime of the defun # backend function. Since the CPU/GPU implementation are mathematically same, we # need some signal for the function to indicate which function is executed. This # is for testing purpose to verify the correctness of swapping backend function. _RUNTIME_UNKNOWN = 0 _RUNTIME_CPU = 1 _RUNTIME_GPU = 2 @keras_export('keras.layers.GRUCell', v1=[]) class GRUCell(recurrent.GRUCell): """Cell class for the GRU layer. See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn) for details about the usage of RNN API. This class processes one step within the whole time sequence input, whereas `tf.keras.layer.GRU` processes the whole sequence. Arguments: units: Positive integer, dimensionality of the output space. activation: Activation function to use. Default: hyperbolic tangent (`tanh`). If you pass None, no activation is applied (ie. "linear" activation: `a(x) = x`). recurrent_activation: Activation function to use for the recurrent step. Default: sigmoid (`sigmoid`). If you pass `None`, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean, (default `True`), whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. Default: `glorot_uniform`. recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. Default: `orthogonal`. bias_initializer: Initializer for the bias vector. Default: `zeros`. kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. Default: `None`. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. Default: `None`. bias_regularizer: Regularizer function applied to the bias vector. Default: `None`. kernel_constraint: Constraint function applied to the `kernel` weights matrix. Default: `None`. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. Default: `None`. bias_constraint: Constraint function applied to the bias vector. Default: `None`. dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. Default: 0. recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. Default: 0. implementation: Implementation mode, either 1 or 2. Mode 1 will structure its operations as a larger number of smaller dot products and additions, whereas mode 2 (default) will batch them into fewer, larger operations. These modes will have different performance profiles on different hardware and for different applications. Default: 2. reset_after: GRU convention (whether to apply reset gate after or before matrix multiplication). False = "before", True = "after" (default and CuDNN compatible). Call arguments: inputs: A 2D tensor, with shape of `[batch, feature]`. states: A 2D tensor with shape of `[batch, units]`, which is the state from the previous time step. For timestep 0, the initial state provided by user will be feed to cell. training: Python boolean indicating whether the layer should behave in training mode or in inference mode. Only relevant when `dropout` or `recurrent_dropout` is used. Examples: ```python inputs = np.random.random([32, 10, 8]).astype(np.float32) rnn = tf.keras.layers.RNN(tf.keras.layers.GRUCell(4)) output = rnn(inputs) # The output has shape `[32, 4]`. rnn = tf.keras.layers.RNN( tf.keras.layers.GRUCell(4), return_sequences=True, return_state=True) # whole_sequence_output has shape `[32, 10, 4]`. # final_state has shape `[32, 4]`. whole_sequence_output, final_state = rnn(inputs) ``` """ def __init__(self, units, activation='tanh', recurrent_activation='sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0., recurrent_dropout=0., implementation=2, reset_after=True, **kwargs): super(GRUCell, self).__init__( units, activation=activation, recurrent_activation=recurrent_activation, use_bias=use_bias, kernel_initializer=kernel_initializer, recurrent_initializer=recurrent_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, recurrent_regularizer=recurrent_regularizer, bias_regularizer=bias_regularizer, kernel_constraint=kernel_constraint, recurrent_constraint=recurrent_constraint, bias_constraint=bias_constraint, dropout=dropout, recurrent_dropout=recurrent_dropout, implementation=implementation, reset_after=reset_after, **kwargs) @keras_export('keras.layers.GRU', v1=[]) class GRU(recurrent.DropoutRNNCellMixin, recurrent.GRU): """Gated Recurrent Unit - Cho et al. 2014. See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn) for details about the usage of RNN API. Based on available runtime hardware and constraints, this layer will choose different implementations (cuDNN-based or pure-TensorFlow) to maximize the performance. If a GPU is available and all the arguments to the layer meet the requirement of the CuDNN kernel (see below for details), the layer will use a fast cuDNN implementation. The requirements to use the cuDNN implementation are: 1. `activation` == `tanh` 2. `recurrent_activation` == `sigmoid` 3. `recurrent_dropout` == 0 4. `unroll` is `False` 5. `use_bias` is `True` 6. `reset_after` is `True` 7. Inputs are not masked or strictly right padded. There are two variants of the GRU implementation. The default one is based on [v3](https://arxiv.org/abs/1406.1078v3) and has reset gate applied to hidden state before matrix multiplication. The other one is based on [original](https://arxiv.org/abs/1406.1078v1) and has the order reversed. The second variant is compatible with CuDNNGRU (GPU-only) and allows inference on CPU. Thus it has separate biases for `kernel` and `recurrent_kernel`. To use this variant, set `'reset_after'=True` and `recurrent_activation='sigmoid'`. Arguments: units: Positive integer, dimensionality of the output space. activation: Activation function to use. Default: hyperbolic tangent (`tanh`). If you pass `None`, no activation is applied (ie. "linear" activation: `a(x) = x`). recurrent_activation: Activation function to use for the recurrent step. Default: sigmoid (`sigmoid`). If you pass `None`, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean, (default `True`), whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. Default: `glorot_uniform`. recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. Default: `orthogonal`. bias_initializer: Initializer for the bias vector. Default: `zeros`. kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. Default: `None`. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. Default: `None`. bias_regularizer: Regularizer function applied to the bias vector. Default: `None`. activity_regularizer: Regularizer function applied to the output of the layer (its "activation"). Default: `None`. kernel_constraint: Constraint function applied to the `kernel` weights matrix. Default: `None`. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. Default: `None`. bias_constraint: Constraint function applied to the bias vector. Default: `None`. dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. Default: 0. recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. Default: 0. implementation: Implementation mode, either 1 or 2. Mode 1 will structure its operations as a larger number of smaller dot products and additions, whereas mode 2 will batch them into fewer, larger operations. These modes will have different performance profiles on different hardware and for different applications. Default: 2. return_sequences: Boolean. Whether to return the last output in the output sequence, or the full sequence. Default: `False`. return_state: Boolean. Whether to return the last state in addition to the output. Default: `False`. go_backwards: Boolean (default `False`). If True, process the input sequence backwards and return the reversed sequence. stateful: Boolean (default False). If True, the last state for each sample at index i in a batch will be used as initial state for the sample of index i in the following batch. unroll: Boolean (default False). If True, the network will be unrolled, else a symbolic loop will be used. Unrolling can speed-up a RNN, although it tends to be more memory-intensive. Unrolling is only suitable for short sequences. time_major: The shape format of the `inputs` and `outputs` tensors. If True, the inputs and outputs will be in shape `[timesteps, batch, feature]`, whereas in the False case, it will be `[batch, timesteps, feature]`. Using `time_major = True` is a bit more efficient because it avoids transposes at the beginning and end of the RNN calculation. However, most TensorFlow data is batch-major, so by default this function accepts input and emits output in batch-major form. reset_after: GRU convention (whether to apply reset gate after or before matrix multiplication). False = "before", True = "after" (default and CuDNN compatible). Call arguments: inputs: A 3D tensor, with shape `[batch, timesteps, feature]`. mask: Binary tensor of shape `[samples, timesteps]` indicating whether a given timestep should be masked (optional, defaults to `None`). training: Python boolean indicating whether the layer should behave in training mode or in inference mode. This argument is passed to the cell when calling it. This is only relevant if `dropout` or `recurrent_dropout` is used (optional, defaults to `None`). initial_state: List of initial state tensors to be passed to the first call of the cell (optional, defaults to `None` which causes creation of zero-filled initial state tensors). Examples: ```python inputs = np.random.random([32, 10, 8]).astype(np.float32) gru = tf.keras.layers.GRU(4) output = gru(inputs) # The output has shape `[32, 4]`. gru = tf.keras.layers.GRU(4, return_sequences=True, return_state=True) # whole_sequence_output has shape `[32, 10, 4]`. # final_state has shape `[32, 4]`. whole_sequence_output, final_state = gru(inputs) ``` """ def __init__(self, units, activation='tanh', recurrent_activation='sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0., recurrent_dropout=0., implementation=2, return_sequences=False, return_state=False, go_backwards=False, stateful=False, unroll=False, time_major=False, reset_after=True, **kwargs): # return_runtime is a flag for testing, which shows the real backend # implementation chosen by grappler in graph mode. self._return_runtime = kwargs.pop('return_runtime', False) super(GRU, self).__init__( units, activation=activation, recurrent_activation=recurrent_activation, use_bias=use_bias, kernel_initializer=kernel_initializer, recurrent_initializer=recurrent_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, recurrent_regularizer=recurrent_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, recurrent_constraint=recurrent_constraint, bias_constraint=bias_constraint, dropout=dropout, recurrent_dropout=recurrent_dropout, implementation=implementation, return_sequences=return_sequences, return_state=return_state, go_backwards=go_backwards, stateful=stateful, unroll=unroll, time_major=time_major, reset_after=reset_after, **kwargs) # CuDNN uses following setting by default and not configurable. self.could_use_cudnn = ( activation == 'tanh' and recurrent_activation == 'sigmoid' and recurrent_dropout == 0 and not unroll and use_bias and reset_after and ops.executing_eagerly_outside_functions()) def build(self, input_shape): super(GRU, self).build(input_shape) if not all(isinstance(v, resource_variable_ops.ResourceVariable) for v in self.weights): # Non-resource variables, such as DistributedVariables and # AutoCastVariables, do not work properly with the implementation # selector, which is used when cuDNN is used. However, by chance, such # variables happen to work in LSTM, so this check is only needed for GRU. # TODO(b/136512020): Make non-resource variables work with the # implementation selector. self.could_use_cudnn = False def call(self, inputs, mask=None, training=None, initial_state=None): # The input should be dense, padded with zeros. If a ragged input is fed # into the layer, it is padded and the row lengths are used for masking. inputs, row_lengths = K.convert_inputs_if_ragged(inputs) is_ragged_input = (row_lengths is not None) self._validate_args_if_ragged(is_ragged_input, mask) # GRU does not support constants. Ignore it during process. inputs, initial_state, _ = self._process_inputs(inputs, initial_state, None) if isinstance(mask, list): mask = mask[0] input_shape = K.int_shape(inputs) timesteps = input_shape[0] if self.time_major else input_shape[1] if not self.could_use_cudnn: kwargs = {'training': training} self._maybe_reset_cell_dropout_mask(self.cell) def step(cell_inputs, cell_states): return self.cell.call(cell_inputs, cell_states, **kwargs) last_output, outputs, states = K.rnn( step, inputs, initial_state, constants=None, go_backwards=self.go_backwards, mask=mask, unroll=self.unroll, input_length=row_lengths if row_lengths is not None else timesteps, time_major=self.time_major, zero_output_for_mask=self.zero_output_for_mask) # This is a dummy tensor for testing purpose. runtime = _runtime(_RUNTIME_UNKNOWN) else: last_output, outputs, runtime, states = self._defun_gru_call( inputs, initial_state, training, mask, row_lengths) if self.stateful: updates = [state_ops.assign(self.states[0], states[0])] self.add_update(updates) if self.return_sequences: output = K.maybe_convert_to_ragged(is_ragged_input, outputs, row_lengths) else: output = last_output if self.return_state: return [output] + list(states) elif self._return_runtime: return output, runtime else: return output def _defun_gru_call(self, inputs, initial_state, training, mask, sequence_lengths): # Use the new defun approach for backend implementation swap. # Note that different implementations need to have same function # signature, eg, the tensor parameters need to have same shape and dtypes. self.reset_dropout_mask() dropout_mask = self.get_dropout_mask_for_cell(inputs, training, count=3) if dropout_mask is not None: inputs = inputs * dropout_mask[0] cudnn_gru_kwargs = { 'inputs': inputs, 'init_h': initial_state[0], 'kernel': self.cell.kernel, 'recurrent_kernel': self.cell.recurrent_kernel, 'bias': self.cell.bias, 'mask': mask, 'time_major': self.time_major, 'go_backwards': self.go_backwards, 'sequence_lengths': sequence_lengths } normal_gru_kwargs = cudnn_gru_kwargs.copy() normal_gru_kwargs.update({ 'activation': self.activation, 'recurrent_activation': self.recurrent_activation }) if context.executing_eagerly(): device_type = _get_context_device_type() can_use_gpu = ( # Either user specified GPU or unspecified but GPU is available. (device_type == _GPU_DEVICE_NAME or (device_type is None and context.num_gpus() > 0)) and (mask is None or is_sequence_right_padded(mask, self.time_major))) # Under eager context, check the device placement and prefer the if can_use_gpu: last_output, outputs, new_h, runtime = cudnn_gru(**cudnn_gru_kwargs) else: last_output, outputs, new_h, runtime = standard_gru(**normal_gru_kwargs) else: last_output, outputs, new_h, runtime = gru_with_backend_selection( **normal_gru_kwargs) states = [new_h] return last_output, outputs, runtime, states def standard_gru(inputs, init_h, kernel, recurrent_kernel, bias, activation, recurrent_activation, mask, time_major, go_backwards, sequence_lengths): """GRU with standard kernel implementation. This implementation can be run on all types of hardware. This implementation lifts out all the layer weights and make them function parameters. It has same number of tensor input params as the CuDNN counterpart. The RNN step logic has been simplified, eg dropout and mask is removed since CuDNN implementation does not support that. Arguments: inputs: Input tensor of GRU layer. init_h: Initial state tensor for the cell output. kernel: Weights for cell kernel. recurrent_kernel: Weights for cell recurrent kernel. bias: Weights for cell kernel bias and recurrent bias. The bias contains the combined input_bias and recurrent_bias. activation: Activation function to use for output. recurrent_activation: Activation function to use for hidden recurrent state. mask: Binary tensor of shape `(samples, timesteps)` indicating whether a given timestep should be masked. time_major: Boolean, whether the inputs are in the format of [time, batch, feature] or [batch, time, feature]. go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. sequence_lengths: The lengths of all sequences coming from a variable length input, such as ragged tensors. If the input has a fixed timestep size, this should be None. Returns: last_output: output tensor for the last timestep, which has shape [batch, units]. outputs: output tensor for all timesteps, which has shape [batch, time, units]. state_0: the cell output, which has same shape as init_h. runtime: constant string tensor which indicate real runtime hardware. This value is for testing purpose and should be used by user. """ input_shape = K.int_shape(inputs) timesteps = input_shape[0] if time_major else input_shape[1] input_bias, recurrent_bias = array_ops.unstack(bias) def step(cell_inputs, cell_states): """Step function that will be used by Keras RNN backend.""" h_tm1 = cell_states[0] # inputs projected by all gate matrices at once matrix_x = K.dot(cell_inputs, kernel) matrix_x = K.bias_add(matrix_x, input_bias) x_z, x_r, x_h = array_ops.split(matrix_x, 3, axis=1) # hidden state projected by all gate matrices at once matrix_inner = K.dot(h_tm1, recurrent_kernel) matrix_inner = K.bias_add(matrix_inner, recurrent_bias) recurrent_z, recurrent_r, recurrent_h = array_ops.split(matrix_inner, 3, axis=1) z = recurrent_activation(x_z + recurrent_z) r = recurrent_activation(x_r + recurrent_r) hh = activation(x_h + r * recurrent_h) # previous and candidate state mixed by update gate h = z * h_tm1 + (1 - z) * hh return h, [h] last_output, outputs, new_states = K.rnn( step, inputs, [init_h], constants=None, unroll=False, time_major=time_major, mask=mask, go_backwards=go_backwards, input_length=sequence_lengths if sequence_lengths is not None else timesteps) return last_output, outputs, new_states[0], _runtime(_RUNTIME_CPU) def cudnn_gru(inputs, init_h, kernel, recurrent_kernel, bias, mask, time_major, go_backwards, sequence_lengths): """GRU with CuDNN implementation which is only available for GPU.""" if not time_major and mask is None: inputs = array_ops.transpose(inputs, perm=(1, 0, 2)) seq_axis, batch_axis = (0, 1) else: seq_axis, batch_axis = (0, 1) if time_major else (1, 0) # For init_h, cuDNN expects one more dim of num_layers before or after batch # dim for time major or batch major inputs respectively init_h = array_ops.expand_dims(init_h, axis=seq_axis) weights = array_ops.split(kernel, 3, axis=1) weights += array_ops.split(recurrent_kernel, 3, axis=1) # Note that the bias was initialized as shape (2, 3 * units), flat it into # (6 * units) bias = array_ops.split(K.flatten(bias), 6) # Note that the gate order for CuDNN is different from the canonical format. # canonical format is [z, r, h], whereas CuDNN is [r, z, h]. The swap need to # be done for kernel, recurrent_kernel, input_bias, recurrent_bias. # z is update gate weights. # r is reset gate weights. # h is output gate weights. weights[0], weights[1] = weights[1], weights[0] weights[3], weights[4] = weights[4], weights[3] bias[0], bias[1] = bias[1], bias[0] bias[3], bias[4] = bias[4], bias[3] params = _canonical_to_params( weights=weights, biases=bias, shape=constant_op.constant([-1]), transpose_weights=True) if mask is not None: sequence_lengths = calculate_sequence_by_mask(mask, time_major) if sequence_lengths is not None: if go_backwards: # Three reversals are required. E.g., # normal input = [1, 2, 3, 0, 0] # where 0 need to be masked # reversed_input_to_cudnn = [3, 2, 1, 0, 0] # output_from_cudnn = [6, 5, 4, 0, 0] # expected_output = [0, 0, 6, 5 ,4] inputs = array_ops.reverse_sequence_v2( inputs, sequence_lengths, seq_axis=seq_axis, batch_axis=batch_axis) outputs, h, _, _, _ = gen_cudnn_rnn_ops.cudnn_rnnv3( inputs, input_h=init_h, input_c=0, params=params, is_training=True, rnn_mode='gru', sequence_lengths=sequence_lengths, time_major=time_major) if go_backwards: outputs = array_ops.reverse_sequence_v2( outputs, sequence_lengths, seq_axis=seq_axis, batch_axis=batch_axis) outputs = array_ops.reverse(outputs, axis=[seq_axis]) else: if go_backwards: # Reverse axis 0 since the input is already convert to time major. inputs = array_ops.reverse(inputs, axis=[0]) outputs, h, _, _ = gen_cudnn_rnn_ops.cudnn_rnn( inputs, input_h=init_h, input_c=0, params=params, is_training=True, rnn_mode='gru') last_output = outputs[-1] if not time_major and mask is None: outputs = array_ops.transpose(outputs, perm=[1, 0, 2]) h = array_ops.squeeze(h, axis=seq_axis) # In the case of variable length input, the cudnn kernel will fill zeros for # the output, whereas the default keras behavior is to bring over the previous # output for t-1, so that in the return_sequence=False case, user can quickly # get the final effect output instead just 0s at the last timestep. # In order to mimic the default keras behavior, we copy the final h state as # the last_output, since it is numerically same as the output. if mask is not None: last_output = h return last_output, outputs, h, _runtime(_RUNTIME_GPU) def gru_with_backend_selection(inputs, init_h, kernel, recurrent_kernel, bias, mask, time_major, go_backwards, activation, recurrent_activation, sequence_lengths): """Call the GRU with optimized backend kernel selection. Under the hood, this function will create two TF function, one with the most generic kernel and can run on all device condition, and the second one with CuDNN specific kernel, which can only run on GPU. The first function will be called with normal_lstm_params, while the second function is not called, but only registered in the graph. The Grappler will do the proper graph rewrite and swap the optimized TF function based on the device placement. Args: inputs: Input tensor of GRU layer. init_h: Initial state tensor for the cell output. kernel: Weights for cell kernel. recurrent_kernel: Weights for cell recurrent kernel. bias: Weights for cell kernel bias and recurrent bias. Only recurrent bias is used in this case. mask: Boolean tensor for mask out the steps within sequence. time_major: Boolean, whether the inputs are in the format of [time, batch, feature] or [batch, time, feature]. go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. activation: Activation function to use for output. recurrent_activation: Activation function to use for hidden recurrent state. sequence_lengths: The lengths of all sequences coming from a variable length input, such as ragged tensors. If the input has a fixed timestep size, this should be None. Returns: List of output tensors, same as standard_gru. """ params = { 'inputs': inputs, 'init_h': init_h, 'kernel': kernel, 'recurrent_kernel': recurrent_kernel, 'bias': bias, 'mask': mask, 'time_major': time_major, 'go_backwards': go_backwards, 'activation': activation, 'recurrent_activation': recurrent_activation, 'sequence_lengths': sequence_lengths } def cudnn_gru_with_fallback(inputs, init_h, kernel, recurrent_kernel, bias, mask, time_major, go_backwards, activation, recurrent_activation, sequence_lengths): """Use CuDNN kernel when mask is none or strictly right padded.""" if mask is None: return cudnn_gru( inputs=inputs, init_h=init_h, kernel=kernel, recurrent_kernel=recurrent_kernel, bias=bias, mask=mask, time_major=time_major, go_backwards=go_backwards, sequence_lengths=sequence_lengths) def input_right_padded(): return cudnn_gru( inputs=inputs, init_h=init_h, kernel=kernel, recurrent_kernel=recurrent_kernel, bias=bias, mask=mask, time_major=time_major, go_backwards=go_backwards, sequence_lengths=sequence_lengths) def input_not_right_padded(): return standard_gru( inputs=inputs, init_h=init_h, kernel=kernel, recurrent_kernel=recurrent_kernel, bias=bias, mask=mask, time_major=time_major, go_backwards=go_backwards, activation=activation, recurrent_activation=recurrent_activation, sequence_lengths=sequence_lengths) return control_flow_ops.cond( is_sequence_right_padded(mask, time_major), true_fn=input_right_padded, false_fn=input_not_right_padded) # Each time a `tf.function` is called, we will give it a unique # identifiable API name, so that Grappler won't get confused when it # sees multiple GRU layers added into same graph, and it will be able # to pair up the different implementations across them. api_name = 'gru_' + str(uuid.uuid4()) defun_standard_gru = _generate_defun_backend( api_name, _CPU_DEVICE_NAME, standard_gru) defun_cudnn_gru = _generate_defun_backend( api_name, _GPU_DEVICE_NAME, cudnn_gru_with_fallback) # Call the normal GRU impl and register the CuDNN impl function. The # grappler will kick in during session execution to optimize the graph. last_output, outputs, new_h, runtime = defun_standard_gru(**params) function.register(defun_cudnn_gru, **params) return last_output, outputs, new_h, runtime @keras_export('keras.layers.LSTMCell', v1=[]) class LSTMCell(recurrent.LSTMCell): """Cell class for the LSTM layer. See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn) for details about the usage of RNN API. This class processes one step within the whole time sequence input, whereas `tf.keras.layer.LSTM` processes the whole sequence. Arguments: units: Positive integer, dimensionality of the output space. activation: Activation function to use. Default: hyperbolic tangent (`tanh`). If you pass `None`, no activation is applied (ie. "linear" activation: `a(x) = x`). recurrent_activation: Activation function to use for the recurrent step. Default: sigmoid (`sigmoid`). If you pass `None`, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean, (default `True`), whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. Default: `glorot_uniform`. recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. Default: `orthogonal`. bias_initializer: Initializer for the bias vector. Default: `zeros`. unit_forget_bias: Boolean (default `True`). If True, add 1 to the bias of the forget gate at initialization. Setting it to true will also force `bias_initializer="zeros"`. This is recommended in [Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf) kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. Default: `None`. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. Default: `None`. bias_regularizer: Regularizer function applied to the bias vector. Default: `None`. kernel_constraint: Constraint function applied to the `kernel` weights matrix. Default: `None`. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. Default: `None`. bias_constraint: Constraint function applied to the bias vector. Default: `None`. dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. Default: 0. recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. Default: 0. implementation: Implementation mode, either 1 or 2. Mode 1 will structure its operations as a larger number of smaller dot products and additions, whereas mode 2 (default) will batch them into fewer, larger operations. These modes will have different performance profiles on different hardware and for different applications. Default: 2. Call arguments: inputs: A 2D tensor, with shape of `[batch, feature]`. states: List of 2 tensors that corresponding to the cell's units. Both of them have shape `[batch, units]`, the first tensor is the memory state from previous time step, the second tesnor is the carry state from previous time step. For timestep 0, the initial state provided by user will be feed to cell. training: Python boolean indicating whether the layer should behave in training mode or in inference mode. Only relevant when `dropout` or `recurrent_dropout` is used. Examples: ```python inputs = np.random.random([32, 10, 8]).astype(np.float32) rnn = tf.keras.layers.RNN(tf.keras.layers.LSTMCell(4)) output = rnn(inputs) # The output has shape `[32, 4]`. rnn = tf.keras.layers.RNN( tf.keras.layers.LSTMCell(4), return_sequences=True, return_state=True) # whole_sequence_output has shape `[32, 10, 4]`. # final_memory_state and final_carry_state both have shape `[32, 4]`. whole_sequence_output, final_memory_state, final_carry_state = rnn(inputs) ``` """ def __init__(self, units, activation='tanh', recurrent_activation='sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0., recurrent_dropout=0., implementation=2, **kwargs): super(LSTMCell, self).__init__( units, activation=activation, recurrent_activation=recurrent_activation, use_bias=use_bias, kernel_initializer=kernel_initializer, recurrent_initializer=recurrent_initializer, bias_initializer=bias_initializer, unit_forget_bias=unit_forget_bias, kernel_regularizer=kernel_regularizer, recurrent_regularizer=recurrent_regularizer, bias_regularizer=bias_regularizer, kernel_constraint=kernel_constraint, recurrent_constraint=recurrent_constraint, bias_constraint=bias_constraint, dropout=dropout, recurrent_dropout=recurrent_dropout, implementation=implementation, **kwargs) @keras_export('keras.layers.LSTM', v1=[]) class LSTM(recurrent.DropoutRNNCellMixin, recurrent.LSTM): """Long Short-Term Memory layer - Hochreiter 1997. See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn) for details about the usage of RNN API. Based on available runtime hardware and constraints, this layer will choose different implementations (cuDNN-based or pure-TensorFlow) to maximize the performance. If a GPU is available and all the arguments to the layer meet the requirement of the CuDNN kernel (see below for details), the layer will use a fast cuDNN implementation. The requirements to use the cuDNN implementation are: 1. `activation` == `tanh` 2. `recurrent_activation` == `sigmoid` 3. `recurrent_dropout` == 0 4. `unroll` is `False` 5. `use_bias` is `True` 6. Inputs are not masked or strictly right padded. Arguments: units: Positive integer, dimensionality of the output space. activation: Activation function to use. Default: hyperbolic tangent (`tanh`). If you pass `None`, no activation is applied (ie. "linear" activation: `a(x) = x`). recurrent_activation: Activation function to use for the recurrent step. Default: sigmoid (`sigmoid`). If you pass `None`, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean (default `True`), whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. Default: `glorot_uniform`. recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. Default: `orthogonal`. bias_initializer: Initializer for the bias vector. Default: `zeros`. unit_forget_bias: Boolean (default `True`). If True, add 1 to the bias of the forget gate at initialization. Setting it to true will also force `bias_initializer="zeros"`. This is recommended in [Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf). kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. Default: `None`. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. Default: `None`. bias_regularizer: Regularizer function applied to the bias vector. Default: `None`. activity_regularizer: Regularizer function applied to the output of the layer (its "activation"). Default: `None`. kernel_constraint: Constraint function applied to the `kernel` weights matrix. Default: `None`. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. Default: `None`. bias_constraint: Constraint function applied to the bias vector. Default: `None`. dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. Default: 0. recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. Default: 0. implementation: Implementation mode, either 1 or 2. Mode 1 will structure its operations as a larger number of smaller dot products and additions, whereas mode 2 will batch them into fewer, larger operations. These modes will have different performance profiles on different hardware and for different applications. Default: 2. return_sequences: Boolean. Whether to return the last output. in the output sequence, or the full sequence. Default: `False`. return_state: Boolean. Whether to return the last state in addition to the output. Default: `False`. go_backwards: Boolean (default `False`). If True, process the input sequence backwards and return the reversed sequence. stateful: Boolean (default `False`). If True, the last state for each sample at index i in a batch will be used as initial state for the sample of index i in the following batch. time_major: The shape format of the `inputs` and `outputs` tensors. If True, the inputs and outputs will be in shape `[timesteps, batch, feature]`, whereas in the False case, it will be `[batch, timesteps, feature]`. Using `time_major = True` is a bit more efficient because it avoids transposes at the beginning and end of the RNN calculation. However, most TensorFlow data is batch-major, so by default this function accepts input and emits output in batch-major form. unroll: Boolean (default `False`). If True, the network will be unrolled, else a symbolic loop will be used. Unrolling can speed-up a RNN, although it tends to be more memory-intensive. Unrolling is only suitable for short sequences. Call arguments: inputs: A 3D tensor with shape `[batch, timesteps, feature]`. mask: Binary tensor of shape `[batch, timesteps]` indicating whether a given timestep should be masked (optional, defaults to `None`). training: Python boolean indicating whether the layer should behave in training mode or in inference mode. This argument is passed to the cell when calling it. This is only relevant if `dropout` or `recurrent_dropout` is used (optional, defaults to `None`). initial_state: List of initial state tensors to be passed to the first call of the cell (optional, defaults to `None` which causes creation of zero-filled initial state tensors). Examples: ```python inputs = np.random.random([32, 10, 8]).astype(np.float32) lstm = tf.keras.layers.LSTM(4) output = lstm(inputs) # The output has shape `[32, 4]`. lstm = tf.keras.layers.LSTM(4, return_sequences=True, return_state=True) # whole_sequence_output has shape `[32, 10, 4]`. # final_memory_state and final_carry_state both have shape `[32, 4]`. whole_sequence_output, final_memory_state, final_carry_state = lstm(inputs) ``` """ def __init__(self, units, activation='tanh', recurrent_activation='sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0., recurrent_dropout=0., implementation=2, return_sequences=False, return_state=False, go_backwards=False, stateful=False, time_major=False, unroll=False, **kwargs): # return_runtime is a flag for testing, which shows the real backend # implementation chosen by grappler in graph mode. self.return_runtime = kwargs.pop('return_runtime', False) super(LSTM, self).__init__( units, activation=activation, recurrent_activation=recurrent_activation, use_bias=use_bias, kernel_initializer=kernel_initializer, recurrent_initializer=recurrent_initializer, bias_initializer=bias_initializer, unit_forget_bias=unit_forget_bias, kernel_regularizer=kernel_regularizer, recurrent_regularizer=recurrent_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, recurrent_constraint=recurrent_constraint, bias_constraint=bias_constraint, dropout=dropout, recurrent_dropout=recurrent_dropout, implementation=implementation, return_sequences=return_sequences, return_state=return_state, go_backwards=go_backwards, stateful=stateful, time_major=time_major, unroll=unroll, **kwargs) self.state_spec = [ InputSpec(shape=(None, dim)) for dim in (self.units, self.units) ] self.could_use_cudnn = ( activation == 'tanh' and recurrent_activation == 'sigmoid' and recurrent_dropout == 0 and not unroll and use_bias and ops.executing_eagerly_outside_functions()) def call(self, inputs, mask=None, training=None, initial_state=None): # The input should be dense, padded with zeros. If a ragged input is fed # into the layer, it is padded and the row lengths are used for masking. inputs, row_lengths = K.convert_inputs_if_ragged(inputs) is_ragged_input = (row_lengths is not None) self._validate_args_if_ragged(is_ragged_input, mask) # LSTM does not support constants. Ignore it during process. inputs, initial_state, _ = self._process_inputs(inputs, initial_state, None) if isinstance(mask, list): mask = mask[0] input_shape = K.int_shape(inputs) timesteps = input_shape[0] if self.time_major else input_shape[1] if not self.could_use_cudnn: # Fall back to use the normal LSTM. kwargs = {'training': training} self._maybe_reset_cell_dropout_mask(self.cell) def step(inputs, states): return self.cell.call(inputs, states, **kwargs) last_output, outputs, states = K.rnn( step, inputs, initial_state, constants=None, go_backwards=self.go_backwards, mask=mask, unroll=self.unroll, input_length=row_lengths if row_lengths is not None else timesteps, time_major=self.time_major, zero_output_for_mask=self.zero_output_for_mask) runtime = _runtime(_RUNTIME_UNKNOWN) else: # Use the new defun approach for backend implementation swap. # Note that different implementations need to have same function # signature, eg, the tensor parameters need to have same shape and dtypes. # Since the CuDNN has an extra set of bias, those bias will be passed to # both normal and CuDNN implementations. self.reset_dropout_mask() dropout_mask = self.get_dropout_mask_for_cell(inputs, training, count=4) if dropout_mask is not None: inputs = inputs * dropout_mask[0] cudnn_lstm_kwargs = { 'inputs': inputs, 'init_h': initial_state[0], 'init_c': initial_state[1], 'kernel': self.cell.kernel, 'recurrent_kernel': self.cell.recurrent_kernel, 'bias': self.cell.bias, 'mask': mask, 'time_major': self.time_major, 'go_backwards': self.go_backwards, 'sequence_lengths': row_lengths } normal_lstm_kwargs = cudnn_lstm_kwargs.copy() normal_lstm_kwargs.update({ 'activation': self.activation, 'recurrent_activation': self.recurrent_activation }) if context.executing_eagerly(): device_type = _get_context_device_type() can_use_gpu = ( # Either user specified GPU or unspecified but GPU is available. (device_type == _GPU_DEVICE_NAME or (device_type is None and context.num_gpus() > 0)) and (mask is None or is_sequence_right_padded(mask, self.time_major))) # Under eager context, check the device placement and prefer the # GPU implementation when GPU is available. if can_use_gpu: last_output, outputs, new_h, new_c, runtime = cudnn_lstm( **cudnn_lstm_kwargs) else: last_output, outputs, new_h, new_c, runtime = standard_lstm( **normal_lstm_kwargs) else: (last_output, outputs, new_h, new_c, runtime) = lstm_with_backend_selection(**normal_lstm_kwargs) states = [new_h, new_c] if self.stateful: updates = [] for i in range(len(states)): updates.append(state_ops.assign(self.states[i], states[i])) self.add_update(updates) if self.return_sequences: output = K.maybe_convert_to_ragged(is_ragged_input, outputs, row_lengths) else: output = last_output if self.return_state: return [output] + list(states) elif self.return_runtime: return output, runtime else: return output def _canonical_to_params(weights, biases, shape, transpose_weights=False): """Utility function convert variable to CuDNN compatible parameter. Note that Keras weights for kernels are different from the CuDNN format. Eg.: ``` Keras CuDNN [[0, 1, 2], <---> [[0, 2, 4], [3, 4, 5]] [1, 3, 5]] ``` If the input weights need to be in a unified format, then set `transpose_weights=True` to convert the weights. Args: weights: list of weights for the individual kernels and recurrent kernels. biases: list of biases for individual gate. shape: the shape for the converted variables that will be feed to CuDNN. transpose_weights: boolean, whether to transpose the weights. Returns: The converted weights that can be feed to CuDNN ops as param. """ def convert(w): return array_ops.transpose(w) if transpose_weights else w weights = [array_ops.reshape(convert(x), shape) for x in weights] biases = [array_ops.reshape(x, shape) for x in biases] return array_ops.concat(weights + biases, axis=0) def standard_lstm(inputs, init_h, init_c, kernel, recurrent_kernel, bias, activation, recurrent_activation, mask, time_major, go_backwards, sequence_lengths): """LSTM with standard kernel implementation. This implementation can be run on all types for hardware. This implementation lifts out all the layer weights and make them function parameters. It has same number of tensor input params as the CuDNN counterpart. The RNN step logic has been simplified, eg dropout and mask is removed since CuDNN implementation does not support that. Note that the first half of the bias tensor should be ignored by this impl. The CuDNN impl need an extra set of input gate bias. In order to make the both function take same shape of parameter, that extra set of bias is also feed here. Args: inputs: input tensor of LSTM layer. init_h: initial state tensor for the cell output. init_c: initial state tensor for the cell hidden state. kernel: weights for cell kernel. recurrent_kernel: weights for cell recurrent kernel. bias: weights for cell kernel bias and recurrent bias. Only recurrent bias is used in this case. activation: Activation function to use for output. recurrent_activation: Activation function to use for hidden recurrent state. mask: Boolean tensor for mask out the steps within sequence. time_major: boolean, whether the inputs are in the format of [time, batch, feature] or [batch, time, feature]. go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. sequence_lengths: The lengths of all sequences coming from a variable length input, such as ragged tensors. If the input has a fixed timestep size, this should be None. Returns: last_output: output tensor for the last timestep, which has shape [batch, units]. outputs: output tensor for all timesteps, which has shape [batch, time, units]. state_0: the cell output, which has same shape as init_h. state_1: the cell hidden state, which has same shape as init_c. runtime: constant string tensor which indicate real runtime hardware. This value is for testing purpose and should be used by user. """ input_shape = K.int_shape(inputs) timesteps = input_shape[0] if time_major else input_shape[1] def step(cell_inputs, cell_states): """Step function that will be used by Keras RNN backend.""" h_tm1 = cell_states[0] # previous memory state c_tm1 = cell_states[1] # previous carry state z = K.dot(cell_inputs, kernel) z += K.dot(h_tm1, recurrent_kernel) z = K.bias_add(z, bias) z0, z1, z2, z3 = array_ops.split(z, 4, axis=1) i = recurrent_activation(z0) f = recurrent_activation(z1) c = f * c_tm1 + i * activation(z2) o = recurrent_activation(z3) h = o * activation(c) return h, [h, c] last_output, outputs, new_states = K.rnn( step, inputs, [init_h, init_c], constants=None, unroll=False, time_major=time_major, mask=mask, go_backwards=go_backwards, input_length=sequence_lengths if sequence_lengths is not None else timesteps) return (last_output, outputs, new_states[0], new_states[1], _runtime(_RUNTIME_CPU)) def cudnn_lstm(inputs, init_h, init_c, kernel, recurrent_kernel, bias, mask, time_major, go_backwards, sequence_lengths): """LSTM with CuDNN implementation which is only available for GPU. Note that currently only right padded data is supported, or the result will be polluted by the unmasked data which should be filtered. Args: inputs: Input tensor of LSTM layer. init_h: Initial state tensor for the cell output. init_c: Initial state tensor for the cell hidden state. kernel: Weights for cell kernel. recurrent_kernel: Weights for cell recurrent kernel. bias: Weights for cell kernel bias and recurrent bias. Only recurrent bias is used in this case. mask: Boolean tensor for mask out the steps within sequence. time_major: Boolean, whether the inputs are in the format of [time, batch, feature] or [batch, time, feature]. go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. sequence_lengths: The lengths of all sequences coming from a variable length input, such as ragged tensors. If the input has a fixed timestep size, this should be None. Returns: last_output: Output tensor for the last timestep, which has shape [batch, units]. outputs: Output tensor for all timesteps, which has shape [batch, time, units]. state_0: The cell output, which has same shape as init_h. state_1: The cell hidden state, which has same shape as init_c. runtime: Constant string tensor which indicate real runtime hardware. This value is for testing purpose and should not be used by user. """ if not time_major and mask is None: inputs = array_ops.transpose(inputs, perm=(1, 0, 2)) seq_axis, batch_axis = (0, 1) else: seq_axis, batch_axis = (0, 1) if time_major else (1, 0) # For init_h and init_c, cuDNN expects one more dim of num_layers before or # after batch dim for time major or batch major inputs respectively init_h = array_ops.expand_dims(init_h, axis=seq_axis) init_c = array_ops.expand_dims(init_c, axis=seq_axis) weights = array_ops.split(kernel, 4, axis=1) weights += array_ops.split(recurrent_kernel, 4, axis=1) # CuDNN has an extra set of bias for inputs, we disable them (setting to 0), # so that mathematically it is same as the canonical LSTM implementation. full_bias = array_ops.concat((array_ops.zeros_like(bias), bias), 0) params = _canonical_to_params( weights=weights, biases=array_ops.split(full_bias, 8), shape=constant_op.constant([-1]), transpose_weights=True) if mask is not None: sequence_lengths = calculate_sequence_by_mask(mask, time_major) if sequence_lengths is not None: if go_backwards: # Three reversals are required. E.g., # normal input = [1, 2, 3, 0, 0] # where 0 need to be masked # reversed_input_to_cudnn = [3, 2, 1, 0, 0] # output_from_cudnn = [6, 5, 4, 0, 0] # expected_output = [0, 0, 6, 5 ,4] inputs = array_ops.reverse_sequence_v2( inputs, sequence_lengths, seq_axis=seq_axis, batch_axis=batch_axis) outputs, h, c, _, _ = gen_cudnn_rnn_ops.cudnn_rnnv3( inputs, input_h=init_h, input_c=init_c, params=params, is_training=True, rnn_mode='lstm', sequence_lengths=sequence_lengths, time_major=time_major) if go_backwards: outputs = array_ops.reverse_sequence_v2( outputs, sequence_lengths, seq_axis=seq_axis, batch_axis=batch_axis) outputs = array_ops.reverse(outputs, axis=[seq_axis]) else: # # Fill the array with shape [batch] with value of max timesteps. # sequence_length = array_ops.fill([array_ops.shape(inputs)[1]], # array_ops.shape(inputs)[0]) if go_backwards: # Reverse axis 0 since the input is already convert to time major. inputs = array_ops.reverse(inputs, axis=[0]) outputs, h, c, _ = gen_cudnn_rnn_ops.cudnn_rnn( inputs, input_h=init_h, input_c=init_c, params=params, is_training=True, rnn_mode='lstm') last_output = outputs[-1] if not time_major and mask is None: outputs = array_ops.transpose(outputs, perm=[1, 0, 2]) h = array_ops.squeeze(h, axis=seq_axis) c = array_ops.squeeze(c, axis=seq_axis) # In the case of variable length input, the cudnn kernel will fill zeros for # the output, whereas the default keras behavior is to bring over the previous # output for t-1, so that in the return_sequence=False case, user can quickly # get the final effect output instead just 0s at the last timestep. # In order to mimic the default keras behavior, we copy the final h state as # the last_output, since it is numerically same as the output. if mask is not None: last_output = h return last_output, outputs, h, c, _runtime(_RUNTIME_GPU) def lstm_with_backend_selection(inputs, init_h, init_c, kernel, recurrent_kernel, bias, mask, time_major, go_backwards, activation, recurrent_activation, sequence_lengths): """Call the LSTM with optimized backend kernel selection. Under the hood, this function will create two TF function, one with the most generic kernel and can run on all device condition, and the second one with CuDNN specific kernel, which can only run on GPU. The first function will be called with normal_lstm_params, while the second function is not called, but only registered in the graph. The Grappler will do the proper graph rewrite and swap the optimized TF function based on the device placement. Args: inputs: Input tensor of LSTM layer. init_h: Initial state tensor for the cell output. init_c: Initial state tensor for the cell hidden state. kernel: Weights for cell kernel. recurrent_kernel: Weights for cell recurrent kernel. bias: Weights for cell kernel bias and recurrent bias. Only recurrent bias is used in this case. mask: Boolean tensor for mask out the steps within sequence. time_major: Boolean, whether the inputs are in the format of [time, batch, feature] or [batch, time, feature]. go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. activation: Activation function to use for output. recurrent_activation: Activation function to use for hidden recurrent state. sequence_lengths: The lengths of all sequences coming from a variable length input, such as ragged tensors. If the input has a fixed timestep size, this should be None. Returns: List of output tensors, same as standard_lstm. """ params = { 'inputs': inputs, 'init_h': init_h, 'init_c': init_c, 'kernel': kernel, 'recurrent_kernel': recurrent_kernel, 'bias': bias, 'mask': mask, 'time_major': time_major, 'go_backwards': go_backwards, 'activation': activation, 'recurrent_activation': recurrent_activation, 'sequence_lengths': sequence_lengths } def cudnn_lstm_with_fallback(inputs, init_h, init_c, kernel, recurrent_kernel, bias, mask, time_major, go_backwards, activation, recurrent_activation, sequence_lengths): """Use CuDNN kernel when mask is none or strictly right padded.""" if mask is None: return cudnn_lstm( inputs=inputs, init_h=init_h, init_c=init_c, kernel=kernel, recurrent_kernel=recurrent_kernel, bias=bias, mask=mask, time_major=time_major, go_backwards=go_backwards, sequence_lengths=sequence_lengths) def input_right_padded(): return cudnn_lstm( inputs=inputs, init_h=init_h, init_c=init_c, kernel=kernel, recurrent_kernel=recurrent_kernel, bias=bias, mask=mask, time_major=time_major, go_backwards=go_backwards, sequence_lengths=sequence_lengths) def input_not_right_padded(): return standard_lstm( inputs=inputs, init_h=init_h, init_c=init_c, kernel=kernel, recurrent_kernel=recurrent_kernel, bias=bias, mask=mask, time_major=time_major, go_backwards=go_backwards, activation=activation, recurrent_activation=recurrent_activation, sequence_lengths=sequence_lengths) return control_flow_ops.cond( is_sequence_right_padded(mask, time_major), true_fn=input_right_padded, false_fn=input_not_right_padded) # Each time a `tf.function` is called, we will give it a unique # identifiable API name, so that Grappler won't get confused when it # sees multiple LSTM layers added into same graph, and it will be able # to pair up the different implementations across them. api_name = 'lstm_' + str(uuid.uuid4()) defun_standard_lstm = _generate_defun_backend( api_name, _CPU_DEVICE_NAME, standard_lstm) defun_cudnn_lstm = _generate_defun_backend( api_name, _GPU_DEVICE_NAME, cudnn_lstm_with_fallback) # Call the normal LSTM impl and register the CuDNN impl function. The # grappler will kick in during session execution to optimize the graph. last_output, outputs, new_h, new_c, runtime = defun_standard_lstm( **params) function.register(defun_cudnn_lstm, **params) return last_output, outputs, new_h, new_c, runtime def is_sequence_right_padded(mask, time_major): """Check the mask tensor and see if it right padded. For CuDNN kernel, it uses the sequence length param to skip the tailing timestep. If the data is left padded, or not a strict right padding (has masked value in the middle of the sequence), then CuDNN kernel won't be work properly in those cases. Left padded data: [[False, False, True, True, True]]. Right padded data: [[True, True, True, False, False]]. Mixture of mask/unmasked data: [[True, False, True, False, False]]. Note that for the mixed data example above, the actually data RNN should see are those 2 Trues (index 0 and 2), the index 1 False should be ignored and not pollute the internal states. Args: mask: the Boolean tensor with shape [batch, timestep] or [timestep, batch] when time_major is True. time_major: Boolean, whether the input mask is time major or batch major. Returns: boolean scalar tensor, whether the mask is strictly right padded. """ if time_major: mask = array_ops.transpose(mask) max_seq_length = array_ops.shape(mask)[1] count_of_true = math_ops.reduce_sum(math_ops.cast(mask, dtypes.int32), axis=1) right_padded_mask = array_ops.sequence_mask( count_of_true, maxlen=max_seq_length) return math_ops.reduce_all(math_ops.equal(mask, right_padded_mask)) def calculate_sequence_by_mask(mask, time_major): """Calculate the sequence length tensor (1-D) based on the masking tensor. The masking tensor is a 2D boolean tensor with shape [batch, timestep]. For any timestep that should be masked, the corresponding field will be False. Consider the following example: a = [[True, True, False, False], [True, True, True, False]] It is a (2, 4) tensor, and the corresponding sequence length result should be 1D tensor with value [2, 3]. Note that the masking tensor must be right padded that could be checked by, e.g., `is_sequence_right_padded()`. Args: mask: Boolean tensor with shape [batch, timestep] or [timestep, batch] if time_major=True. time_major: Boolean, which indicates whether the mask is time major or batch major. Returns: sequence_length: 1D int32 tensor. """ timestep_index = 0 if time_major else 1 return math_ops.reduce_sum(math_ops.cast(mask, dtypes.int32), axis=timestep_index) def _generate_defun_backend(unique_api_name, preferred_device, func): function_attributes = { _DEFUN_API_NAME_ATTRIBUTE: unique_api_name, _DEFUN_DEVICE_ATTRIBUTE: preferred_device, } return function.defun_with_attributes(func=func, attributes=function_attributes, autograph=False) def _get_context_device_type(): """Parse the current context and return the device type, eg CPU/GPU.""" current_device = context.context().device_name if current_device is None: return None return device.DeviceSpec.from_string(current_device).device_type def _runtime(runtime_name): with ops.device('/cpu:0'): return constant_op.constant( runtime_name, dtype=dtypes.float32, name='runtime')
apache-2.0
jyemin/mongo-orchestration
mongo_orchestration/server.py
2
3856
#!/usr/bin/python # coding=utf-8 import argparse import atexit import json import os import sys from bson import SON from mongo_orchestration.daemon import Daemon work_dir = os.environ.get('MONGO_ORCHESTRATION_HOME', os.getcwd()) pid_file = os.path.join(work_dir, 'server.pid') log_file = os.path.join(work_dir, 'server.log') DEFAULT_PORT = 8889 import logging logging.basicConfig(level=logging.DEBUG, filename=log_file, filemode='w') def read_env(): """return command-line arguments""" parser = argparse.ArgumentParser(description='mongo-orchestration server') parser.add_argument('-f', '--config', action='store', default=None, type=str, dest='config') parser.add_argument('-e', '--env', action='store', type=str, dest='env', default=None) parser.add_argument(action='store', type=str, dest='command', default='start', choices=('start', 'stop', 'restart')) parser.add_argument('--no-fork', action='store_true', dest='no_fork', default=False) parser.add_argument('-p', '--port', action='store', dest='port', type=int, default=DEFAULT_PORT) cli_args = parser.parse_args() if cli_args.env and not cli_args.config: print("Specified release '%s' without a config file" % cli_args.env) sys.exit(1) if cli_args.command == 'stop' or not cli_args.config: return cli_args try: # read config with open(cli_args.config, 'r') as fd: config = json.loads(fd.read(), object_hook=SON) if not 'releases' in config: print("No releases defined in %s" % cli_args.config) sys.exit(1) releases = config['releases'] if cli_args.env is not None and cli_args.env not in releases: print("Release '%s' is not defined in %s" % (cli_args.env, cli_args.config)) sys.exit(1) cli_args.releases = releases return cli_args except (IOError): print("config file not found") sys.exit(1) except (ValueError): print("config file is corrupted") sys.exit(1) def setup(releases, default_release): """setup storages""" from mongo_orchestration import set_releases, cleanup_storage set_releases(releases, default_release) atexit.register(cleanup_storage) def get_app(): """return bottle app that includes all sub-apps""" from bottle import default_app default_app.push() for module in ("mongo_orchestration.apps.servers", "mongo_orchestration.apps.replica_sets", "mongo_orchestration.apps.sharded_clusters"): __import__(module) app = default_app.pop() return app class MyDaemon(Daemon): """class uses to run server as daemon""" def __init__(self, *args, **kwd): super(MyDaemon, self).__init__(*args, **kwd) def run(self): from bottle import run setup(getattr(self.args, 'releases', {}), self.args.env) if self.args.command in ('start', 'restart'): print("Starting Mongo Orchestration on port %d..." % self.args.port) run(get_app(), host='localhost', port=self.args.port, debug=False, reloader=False, quiet=not self.args.no_fork, server='cherrypy') def set_args(self, args): self.args = args def main(): daemon = MyDaemon(pid_file, timeout=5, stdout=sys.stdout) args = read_env() daemon.set_args(args) if args.command == 'stop': daemon.stop() if args.command == 'start' and not args.no_fork: daemon.start() if args.command == 'start' and args.no_fork: daemon.run() if args.command == 'restart': daemon.restart() if __name__ == "__main__": main()
apache-2.0
bkonkle/django-baseviews
docs/conf.py
1
7253
# -*- coding: utf-8 -*- # # django-baseviews documentation build configuration file, created by # sphinx-quickstart on Mon Sep 20 23:11:20 2010. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os DOCS_BASE = os.path.dirname(__file__) sys.path.insert(0, os.path.abspath(os.path.join(DOCS_BASE, '..'))) import baseviews # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'django-baseviews' copyright = u'2010, Brandon Konkle' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = baseviews.get_version(short=True) # The full version, including alpha/beta/rc tags. release = baseviews.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'django-baseviewsdoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'django-baseviews.tex', u'django-baseviews Documentation', u'Brandon Konkle', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'django-baseviews', u'django-baseviews Documentation', [u'Brandon Konkle'], 1) ]
bsd-3-clause
mckinseyacademy/xblock-diagnosticfeedback
diagnostic_feedback/settings.py
1
2183
""" Django settings for the diagnostic_feedback project. For more information on this file, see https://docs.djangoproject.com/en/1.11/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.11/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) from __future__ import absolute_import import os import yaml BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! # This is just a container for running tests, it's okay to allow it to be # defaulted here if not present in environment settings SECRET_KEY = os.environ.get('SECRET_KEY', 'xydut433=!s!i(n9u&1oiyv!hu1k=(h-)nuu30d(gd(ew%7+1w') # SECURITY WARNING: don't run with debug turned on in production! # This is just a container for running tests DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'statici18n', 'diagnostic_feedback', 'django.contrib.auth', 'django.contrib.contenttypes', ) # Internationalization # https://docs.djangoproject.com/en/1.6/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.6/howto/static-files/ STATIC_URL = '/static/' LOCALE_PATHS = [ os.path.join(BASE_DIR, 'diagnostic_feedback/translations'), ] # statici18n # http://django-statici18n.readthedocs.io/en/latest/settings.html with open(os.path.join(BASE_DIR, 'diagnostic_feedback/translations/config.yaml'), 'r') as locale_config_file: LOCALE_CONFIG = yaml.load(locale_config_file) LANGUAGES = [ (code, code,) for code in LOCALE_CONFIG['locales'] + LOCALE_CONFIG['dummy_locales'] ] STATICI18N_DOMAIN = 'textjs' STATICI18N_NAMESPACE = 'DiagnosticFeedbackXBlockI18N' STATICI18N_PACKAGES = ( 'diagnostic_feedback', ) STATICI18N_ROOT = 'diagnostic_feedback/public/js' STATICI18N_OUTPUT_DIR = 'translations'
agpl-3.0
Jonekee/chromium.src
third_party/typ/typ/tests/host_test.py
35
6924
# Copyright 2014 Dirk Pranke. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import pickle import sys import unittest from typ.host import Host class TestHost(unittest.TestCase): def host(self): return Host() def test_capture_output(self): try: logging.basicConfig() h = self.host() h.capture_output() h.print_('on stdout') h.print_('on stderr', stream=h.stderr) logging.critical('critical log failure') out, err = h.restore_output() self.assertEqual(out, 'on stdout\n') self.assertEqual(err, 'on stderr\ncritical log failure\n') finally: h.logger.handlers = [] # TODO: Add tests for divert=False or eliminate the flag? def test_abspath_and_realpath(self): h = self.host() self.assertNotEqual(h.abspath(h.getcwd()), None) self.assertNotEqual(h.realpath(h.getcwd()), None) def test_chdir(self): h = self.host() orig_cwd = h.getcwd() h.chdir('.') self.assertEqual(orig_cwd, h.getcwd()) h.chdir('..') self.assertNotEqual(orig_cwd, h.getcwd()) def test_files(self): h = self.host() orig_cwd = h.getcwd() try: now = h.time() # TODO: MacOS does goofy things with temp dirs by default, so # we can't compare for equality. Figure out how to get the normpath # from mkdtemp dirpath = h.mkdtemp(suffix='host_test') self.assertTrue(h.isdir(dirpath)) h.chdir(dirpath) self.assertIn(dirpath, h.getcwd()) h.maybe_mkdir('bar') self.assertTrue(h.exists(dirpath, 'bar')) self.assertTrue(h.isdir(dirpath, 'bar')) self.assertFalse(h.isfile(dirpath, 'bar')) bar_path = h.join(dirpath, 'bar') self.assertEqual(dirpath, h.dirname(bar_path)) h.write_text_file('bar/foo.txt', 'foo') self.assertTrue(h.exists('bar', 'foo.txt')) self.assertEqual(h.read_text_file('bar/foo.txt'), 'foo') self.assertTrue(h.exists(dirpath, 'bar', 'foo.txt')) self.assertTrue(h.isfile(dirpath, 'bar', 'foo.txt')) self.assertFalse(h.isdir(dirpath, 'bar', 'foo.txt')) h.write_binary_file('binfile', b'bin contents') self.assertEqual(h.read_binary_file('binfile'), b'bin contents') self.assertEqual(sorted(h.files_under(dirpath)), ['bar' + h.sep + 'foo.txt', 'binfile']) mtime = h.mtime(dirpath, 'bar', 'foo.txt') self.assertGreaterEqual(now, mtime - 0.1) h.remove(dirpath, 'bar', 'foo.txt') self.assertFalse(h.exists(dirpath, 'bar', 'foo.txt')) self.assertFalse(h.isfile(dirpath, 'bar', 'foo.txt')) h.chdir(orig_cwd) h.rmtree(dirpath) self.assertFalse(h.exists(dirpath)) self.assertFalse(h.isdir(dirpath)) finally: h.chdir(orig_cwd) def test_terminal_width(self): h = self.host() self.assertGreaterEqual(h.terminal_width(), 0) def test_for_mp_and_pickling(self): h = self.host() mp_host = h.for_mp() s = pickle.dumps(mp_host) pickle.loads(s) def test_cpu_count(self): h = self.host() self.assertGreaterEqual(h.cpu_count(), 1) def test_getenv(self): h = self.host() self.assertNotEqual(h.getenv('PATH', ''), None) def test_getpid(self): h = self.host() self.assertNotEqual(h.getpid(), 0) def test_basename(self): h = self.host() self.assertEqual(h.basename('foo.txt'), 'foo.txt') self.assertEqual(h.basename('foo/bar.txt'), 'bar.txt') def test_mktempfile(self, delete=False): h = self.host() f= h.mktempfile() f.close() self.assertNotEqual(f.name, None) def test_splitext(self): h = self.host() self.assertEqual(h.splitext('foo'), ('foo', '')) self.assertEqual(h.splitext('foo.txt'), ('foo', '.txt')) self.assertEqual(h.splitext('foo/bar'), ('foo/bar', '')) self.assertEqual(h.splitext('foo/bar.txt'), ('foo/bar', '.txt')) def test_print(self): h = self.host() class FakeStream(object): def __init__(self): self.contents = None self.flush_called = False def write(self, m): self.contents = m def flush(self): self.flush_called = True s = FakeStream() h.print_('hello', stream=s) self.assertEqual(s.contents, 'hello\n') self.assertTrue(s.flush_called) s = FakeStream() h.stdout = s h.print_('hello') self.assertEqual(s.contents, 'hello\n') s = FakeStream() h.stdout = s h.print_('hello', '') self.assertEqual(s.contents, 'hello') def test_call(self): h = self.host() ret, out, err = h.call( [h.python_interpreter, '-c', 'import sys; sys.stdout.write(sys.stdin.read())'], stdin='foo', env={}) self.assertEqual(ret, 0) self.assertEqual(out, 'foo') self.assertEqual(err, '') ret, out, err = h.call( [h.python_interpreter, '-c', 'import sys; sys.stderr.write("err\\n")']) self.assertEqual(ret, 0) self.assertEqual(out, '') self.assertIn(err, ('err\n', 'err\r\n')) def test_call_inline(self): h = self.host() h.stdout = None h.stderr = None ret = h.call_inline([h.python_interpreter, '-c', 'import sys; sys.exit(0)']) self.assertEqual(ret, 0) def test_add_to_path(self): orig_sys_path = sys.path[:] try: h = self.host() h.add_to_path(sys.path[-1]) self.assertEqual(sys.path, orig_sys_path) dirpath = h.mkdtemp() h.add_to_path(dirpath) self.assertNotEqual(sys.path, orig_sys_path) finally: sys.path = orig_sys_path def test_platform(self): h = self.host() self.assertNotEqual(h.platform, None)
bsd-3-clause
jsalva/djember
djember/djember/wsgi.py
1
1562
""" WSGI config for djember project. This module contains the WSGI application used by Django's development server and any production WSGI deployments. It should expose a module-level variable named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover this application via the ``WSGI_APPLICATION`` setting. Usually you will have the standard Django WSGI application here, but it also might make sense to replace the whole Django WSGI application with a custom one that later delegates to the Django one. For example, you could introduce WSGI middleware here, or combine a Django application with an application of another framework. """ import os from os.path import abspath, dirname from sys import path SITE_ROOT = dirname(dirname(abspath(__file__))) path.append(SITE_ROOT) # We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks # if running multiple sites in the same mod_wsgi process. To fix this, use # mod_wsgi daemon mode with each site in its own daemon process, or use # os.environ["DJANGO_SETTINGS_MODULE"] = "jajaja.settings" os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djember.settings.production") # This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. from django.core.wsgi import get_wsgi_application application = get_wsgi_application() # Apply WSGI middleware here. # from helloworld.wsgi import HelloWorldApplication # application = HelloWorldApplication(application)
mit
hsaputra/tensorflow
tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py
8
31357
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Implementations of different data feeders to provide data for TF trainer.""" # TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues. from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import math import numpy as np import six from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.python.framework import dtypes from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.platform import tf_logging as logging # pylint: disable=g-multiple-import,g-bad-import-order from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels # pylint: enable=g-multiple-import,g-bad-import-order def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None): """Returns shape for input and output of the data feeder.""" x_is_dict, y_is_dict = isinstance( x_shape, dict), y_shape is not None and isinstance(y_shape, dict) if y_is_dict and n_classes is not None: assert isinstance(n_classes, dict) if batch_size is None: batch_size = list(x_shape.values())[0][0] if x_is_dict else x_shape[0] elif batch_size <= 0: raise ValueError('Invalid batch_size %d.' % batch_size) if x_is_dict: input_shape = {} for k, v in list(x_shape.items()): input_shape[k] = [batch_size] + (list(v[1:]) if len(v) > 1 else [1]) else: x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1] input_shape = [batch_size] + x_shape if y_shape is None: return input_shape, None, batch_size def out_el_shape(out_shape, num_classes): out_shape = list(out_shape[1:]) if len(out_shape) > 1 else [] # Skip first dimension if it is 1. if out_shape and out_shape[0] == 1: out_shape = out_shape[1:] if num_classes is not None and num_classes > 1: return [batch_size] + out_shape + [num_classes] else: return [batch_size] + out_shape if not y_is_dict: output_shape = out_el_shape(y_shape, n_classes) else: output_shape = dict([ (k, out_el_shape(v, n_classes[k] if n_classes is not None and k in n_classes else None)) for k, v in list(y_shape.items()) ]) return input_shape, output_shape, batch_size def _data_type_filter(x, y): """Filter data types into acceptable format.""" if HAS_DASK: x = extract_dask_data(x) if y is not None: y = extract_dask_labels(y) if HAS_PANDAS: x = extract_pandas_data(x) if y is not None: y = extract_pandas_labels(y) return x, y def _is_iterable(x): return hasattr(x, 'next') or hasattr(x, '__next__') def setup_train_data_feeder(x, y, n_classes, batch_size=None, shuffle=True, epochs=None): """Create data feeder, to sample inputs from dataset. If `x` and `y` are iterators, use `StreamingDataFeeder`. Args: x: numpy, pandas or Dask matrix or dictionary of aforementioned. Also supports iterables. y: numpy, pandas or Dask array or dictionary of aforementioned. Also supports iterables. n_classes: number of classes. Must be None or same type as y. In case, `y` is `dict` (or iterable which returns dict) such that `n_classes[key] = n_classes for y[key]` batch_size: size to split data into parts. Must be >= 1. shuffle: Whether to shuffle the inputs. epochs: Number of epochs to run. Returns: DataFeeder object that returns training data. Raises: ValueError: if one of `x` and `y` is iterable and the other is not. """ x, y = _data_type_filter(x, y) if HAS_DASK: # pylint: disable=g-import-not-at-top import dask.dataframe as dd if (isinstance(x, (dd.Series, dd.DataFrame)) and (y is None or isinstance(y, (dd.Series, dd.DataFrame)))): data_feeder_cls = DaskDataFeeder else: data_feeder_cls = DataFeeder else: data_feeder_cls = DataFeeder if _is_iterable(x): if y is not None and not _is_iterable(y): raise ValueError('Both x and y should be iterators for ' 'streaming learning to work.') return StreamingDataFeeder(x, y, n_classes, batch_size) return data_feeder_cls( x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs) def _batch_data(x, batch_size=None): if (batch_size is not None) and (batch_size <= 0): raise ValueError('Invalid batch_size %d.' % batch_size) x_first_el = six.next(x) x = itertools.chain([x_first_el], x) chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance( x_first_el, dict) else [] chunk_filled = False for data in x: if isinstance(data, dict): for k, v in list(data.items()): chunk[k].append(v) if (batch_size is not None) and (len(chunk[k]) >= batch_size): chunk[k] = np.matrix(chunk[k]) chunk_filled = True if chunk_filled: yield chunk chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance( x_first_el, dict) else [] chunk_filled = False else: chunk.append(data) if (batch_size is not None) and (len(chunk) >= batch_size): yield np.matrix(chunk) chunk = [] if isinstance(x_first_el, dict): for k, v in list(data.items()): chunk[k] = np.matrix(chunk[k]) yield chunk else: yield np.matrix(chunk) def setup_predict_data_feeder(x, batch_size=None): """Returns an iterable for feeding into predict step. Args: x: numpy, pandas, Dask array or dictionary of aforementioned. Also supports iterable. batch_size: Size of batches to split data into. If `None`, returns one batch of full size. Returns: List or iterator (or dictionary thereof) of parts of data to predict on. Raises: ValueError: if `batch_size` <= 0. """ if HAS_DASK: x = extract_dask_data(x) if HAS_PANDAS: x = extract_pandas_data(x) if _is_iterable(x): return _batch_data(x, batch_size) if len(x.shape) == 1: x = np.reshape(x, (-1, 1)) if batch_size is not None: if batch_size <= 0: raise ValueError('Invalid batch_size %d.' % batch_size) n_batches = int(math.ceil(float(len(x)) / batch_size)) return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)] return [x] def setup_processor_data_feeder(x): """Sets up processor iterable. Args: x: numpy, pandas or iterable. Returns: Iterable of data to process. """ if HAS_PANDAS: x = extract_pandas_matrix(x) return x def check_array(array, dtype): """Checks array on dtype and converts it if different. Args: array: Input array. dtype: Expected dtype. Returns: Original array or converted. """ # skip check if array is instance of other classes, e.g. h5py.Dataset # to avoid copying array and loading whole data into memory if isinstance(array, (np.ndarray, list)): array = np.array(array, dtype=dtype, order=None, copy=False) return array def _access(data, iloc): """Accesses an element from collection, using integer location based indexing. Args: data: array-like. The collection to access iloc: `int` or `list` of `int`s. Location(s) to access in `collection` Returns: The element of `a` found at location(s) `iloc`. """ if HAS_PANDAS: import pandas as pd # pylint: disable=g-import-not-at-top if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame): return data.iloc[iloc] return data[iloc] def _check_dtype(dtype): if dtypes.as_dtype(dtype) == dtypes.float64: logging.warn( 'float64 is not supported by many models, consider casting to float32.') return dtype class DataFeeder(object): """Data feeder is an example class to sample data for TF trainer.""" def __init__(self, x, y, n_classes, batch_size=None, shuffle=True, random_state=None, epochs=None): """Initializes a DataFeeder instance. Args: x: One feature sample which can either Nd numpy matrix of shape `[n_samples, n_features, ...]` or dictionary of Nd numpy matrix. y: label vector, either floats for regression or class id for classification. If matrix, will consider as a sequence of labels. Can be `None` for unsupervised setting. Also supports dictionary of labels. n_classes: Number of classes, 0 and 1 are considered regression, `None` will pass through the input labels without one-hot conversion. Also, if `y` is `dict`, then `n_classes` must be `dict` such that `n_classes[key] = n_classes for label y[key]`, `None` otherwise. batch_size: Mini-batch size to accumulate samples in one mini batch. shuffle: Whether to shuffle `x`. random_state: Numpy `RandomState` object to reproduce sampling. epochs: Number of times to iterate over input data before raising `StopIteration` exception. Attributes: x: Input features (ndarray or dictionary of ndarrays). y: Input label (ndarray or dictionary of ndarrays). n_classes: Number of classes (if `None`, pass through indices without one-hot conversion). batch_size: Mini-batch size to accumulate. input_shape: Shape of the input (or dictionary of shapes). output_shape: Shape of the output (or dictionary of shapes). input_dtype: DType of input (or dictionary of shapes). output_dtype: DType of output (or dictionary of shapes. """ x_is_dict, y_is_dict = isinstance(x, dict), y is not None and isinstance( y, dict) if isinstance(y, list): y = np.array(y) self._x = dict([(k, check_array(v, v.dtype)) for k, v in list(x.items()) ]) if x_is_dict else check_array(x, x.dtype) self._y = None if y is None else ( dict([(k, check_array(v, v.dtype)) for k, v in list(y.items())]) if y_is_dict else check_array(y, y.dtype)) # self.n_classes is not None means we're converting raw target indices # to one-hot. if n_classes is not None: if not y_is_dict: y_dtype = (np.int64 if n_classes is not None and n_classes > 1 else np.float32) self._y = (None if y is None else check_array(y, dtype=y_dtype)) self.n_classes = n_classes self.max_epochs = epochs x_shape = dict([(k, v.shape) for k, v in list(self._x.items()) ]) if x_is_dict else self._x.shape y_shape = dict([(k, v.shape) for k, v in list(self._y.items()) ]) if y_is_dict else None if y is None else self._y.shape self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape( x_shape, y_shape, n_classes, batch_size) # Input dtype matches dtype of x. self._input_dtype = ( dict([(k, _check_dtype(v.dtype)) for k, v in list(self._x.items())]) if x_is_dict else _check_dtype(self._x.dtype)) # self._output_dtype == np.float32 when y is None self._output_dtype = ( dict([(k, _check_dtype(v.dtype)) for k, v in list(self._y.items())]) if y_is_dict else ( _check_dtype(self._y.dtype) if y is not None else np.float32)) # self.n_classes is None means we're passing in raw target indices if n_classes is not None and y_is_dict: for key in list(n_classes.keys()): if key in self._output_dtype: self._output_dtype[key] = np.float32 self._shuffle = shuffle self.random_state = np.random.RandomState( 42) if random_state is None else random_state if x_is_dict: num_samples = list(self._x.values())[0].shape[0] elif tensor_util.is_tensor(self._x): num_samples = self._x.shape[ 0].value # shape will be a Dimension, extract an int else: num_samples = self._x.shape[0] if self._shuffle: self.indices = self.random_state.permutation(num_samples) else: self.indices = np.array(range(num_samples)) self.offset = 0 self.epoch = 0 self._epoch_placeholder = None @property def x(self): return self._x @property def y(self): return self._y @property def shuffle(self): return self._shuffle @property def input_dtype(self): return self._input_dtype @property def output_dtype(self): return self._output_dtype @property def batch_size(self): return self._batch_size def make_epoch_variable(self): """Adds a placeholder variable for the epoch to the graph. Returns: The epoch placeholder. """ self._epoch_placeholder = array_ops.placeholder( dtypes.int32, [1], name='epoch') return self._epoch_placeholder def input_builder(self): """Builds inputs in the graph. Returns: Two placeholders for inputs and outputs. """ def get_placeholder(shape, dtype, name_prepend): if shape is None: return None if isinstance(shape, dict): placeholder = {} for key in list(shape.keys()): placeholder[key] = array_ops.placeholder( dtypes.as_dtype(dtype[key]), [None] + shape[key][1:], name=name_prepend + '_' + key) else: placeholder = array_ops.placeholder( dtypes.as_dtype(dtype), [None] + shape[1:], name=name_prepend) return placeholder self._input_placeholder = get_placeholder(self.input_shape, self._input_dtype, 'input') self._output_placeholder = get_placeholder(self.output_shape, self._output_dtype, 'output') return self._input_placeholder, self._output_placeholder def set_placeholders(self, input_placeholder, output_placeholder): """Sets placeholders for this data feeder. Args: input_placeholder: Placeholder for `x` variable. Should match shape of the examples in the x dataset. output_placeholder: Placeholder for `y` variable. Should match shape of the examples in the y dataset. Can be `None`. """ self._input_placeholder = input_placeholder self._output_placeholder = output_placeholder def get_feed_params(self): """Function returns a `dict` with data feed params while training. Returns: A `dict` with data feed params while training. """ return { 'epoch': self.epoch, 'offset': self.offset, 'batch_size': self._batch_size } def get_feed_dict_fn(self): """Returns a function that samples data into given placeholders. Returns: A function that when called samples a random subset of batch size from `x` and `y`. """ x_is_dict, y_is_dict = isinstance( self._x, dict), self._y is not None and isinstance(self._y, dict) # Assign input features from random indices. def extract(data, indices): return (np.array(_access(data, indices)).reshape((indices.shape[0], 1)) if len(data.shape) == 1 else _access(data, indices)) # assign labels from random indices def assign_label(data, shape, dtype, n_classes, indices): shape[0] = indices.shape[0] out = np.zeros(shape, dtype=dtype) for i in xrange(out.shape[0]): sample = indices[i] # self.n_classes is None means we're passing in raw target indices if n_classes is None: out[i] = _access(data, sample) else: if n_classes > 1: if len(shape) == 2: out.itemset((i, int(_access(data, sample))), 1.0) else: for idx, value in enumerate(_access(data, sample)): out.itemset(tuple([i, idx, value]), 1.0) else: out[i] = _access(data, sample) return out def _feed_dict_fn(): """Function that samples data into given placeholders.""" if self.max_epochs is not None and self.epoch + 1 > self.max_epochs: raise StopIteration assert self._input_placeholder is not None feed_dict = {} if self._epoch_placeholder is not None: feed_dict[self._epoch_placeholder.name] = [self.epoch] # Take next batch of indices. x_len = list(self._x.values())[0].shape[ 0] if x_is_dict else self._x.shape[0] end = min(x_len, self.offset + self._batch_size) batch_indices = self.indices[self.offset:end] # adding input placeholder feed_dict.update( dict([(self._input_placeholder[k].name, extract(v, batch_indices)) for k, v in list(self._x.items())]) if x_is_dict else {self._input_placeholder.name: extract(self._x, batch_indices)}) # move offset and reset it if necessary self.offset += self._batch_size if self.offset >= x_len: self.indices = self.random_state.permutation( x_len) if self._shuffle else np.array(range(x_len)) self.offset = 0 self.epoch += 1 # return early if there are no labels if self._output_placeholder is None: return feed_dict # adding output placeholders if y_is_dict: for k, v in list(self._y.items()): n_classes = (self.n_classes[k] if k in self.n_classes else None) if self.n_classes is not None else None shape, dtype = self.output_shape[k], self._output_dtype[k] feed_dict.update({ self._output_placeholder[k].name: assign_label(v, shape, dtype, n_classes, batch_indices) }) else: shape, dtype, n_classes = self.output_shape, self._output_dtype, self.n_classes feed_dict.update({ self._output_placeholder.name: assign_label(self._y, shape, dtype, n_classes, batch_indices) }) return feed_dict return _feed_dict_fn class StreamingDataFeeder(DataFeeder): """Data feeder for TF trainer that reads data from iterator. Streaming data feeder allows to read data as it comes it from disk or somewhere else. It's custom to have this iterators rotate infinetly over the dataset, to allow control of how much to learn on the trainer side. """ def __init__(self, x, y, n_classes, batch_size): """Initializes a StreamingDataFeeder instance. Args: x: iterator each element of which returns one feature sample. Sample can be a Nd numpy matrix or dictionary of Nd numpy matrices. y: iterator each element of which returns one label sample. Sample can be a Nd numpy matrix or dictionary of Nd numpy matrices with 1 or many classes regression values. n_classes: indicator of how many classes the corresponding label sample has for the purposes of one-hot conversion of label. In case where `y` is a dictionary, `n_classes` must be dictionary (with same keys as `y`) of how many classes there are in each label in `y`. If key is present in `y` and missing in `n_classes`, the value is assumed `None` and no one-hot conversion will be applied to the label with that key. batch_size: Mini batch size to accumulate samples in one batch. If set `None`, then assumes that iterator to return already batched element. Attributes: x: input features (or dictionary of input features). y: input label (or dictionary of output features). n_classes: number of classes. batch_size: mini batch size to accumulate. input_shape: shape of the input (can be dictionary depending on `x`). output_shape: shape of the output (can be dictionary depending on `y`). input_dtype: dtype of input (can be dictionary depending on `x`). output_dtype: dtype of output (can be dictionary depending on `y`). """ # pylint: disable=invalid-name,super-init-not-called x_first_el = six.next(x) self._x = itertools.chain([x_first_el], x) if y is not None: y_first_el = six.next(y) self._y = itertools.chain([y_first_el], y) else: y_first_el = None self._y = None self.n_classes = n_classes x_is_dict = isinstance(x_first_el, dict) y_is_dict = y is not None and isinstance(y_first_el, dict) if y_is_dict and n_classes is not None: assert isinstance(n_classes, dict) # extract shapes for first_elements if x_is_dict: x_first_el_shape = dict( [(k, [1] + list(v.shape)) for k, v in list(x_first_el.items())]) else: x_first_el_shape = [1] + list(x_first_el.shape) if y_is_dict: y_first_el_shape = dict( [(k, [1] + list(v.shape)) for k, v in list(y_first_el.items())]) elif y is None: y_first_el_shape = None else: y_first_el_shape = ([1] + list(y_first_el[0].shape if isinstance( y_first_el, list) else y_first_el.shape)) self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape( x_first_el_shape, y_first_el_shape, n_classes, batch_size) # Input dtype of x_first_el. if x_is_dict: self._input_dtype = dict( [(k, _check_dtype(v.dtype)) for k, v in list(x_first_el.items())]) else: self._input_dtype = _check_dtype(x_first_el.dtype) # Output dtype of y_first_el. def check_y_dtype(el): if isinstance(el, np.ndarray): return el.dtype elif isinstance(el, list): return check_y_dtype(el[0]) else: return _check_dtype(np.dtype(type(el))) # Output types are floats, due to both softmaxes and regression req. if n_classes is not None and (y is None or not y_is_dict) and n_classes > 0: self._output_dtype = np.float32 elif y_is_dict: self._output_dtype = dict( [(k, check_y_dtype(v)) for k, v in list(y_first_el.items())]) elif y is None: self._output_dtype = None else: self._output_dtype = check_y_dtype(y_first_el) def get_feed_params(self): """Function returns a `dict` with data feed params while training. Returns: A `dict` with data feed params while training. """ return {'batch_size': self._batch_size} def get_feed_dict_fn(self): """Returns a function, that will sample data and provide it to placeholders. Returns: A function that when called samples a random subset of batch size from x and y. """ self.stopped = False def _feed_dict_fn(): """Samples data and provides it to placeholders. Returns: `dict` of input and output tensors. """ def init_array(shape, dtype): """Initialize array of given shape or dict of shapes and dtype.""" if shape is None: return None elif isinstance(shape, dict): return dict([(k, np.zeros(shape[k], dtype[k])) for k in list(shape.keys())]) else: return np.zeros(shape, dtype=dtype) def put_data_array(dest, index, source=None, n_classes=None): """Puts data array into container.""" if source is None: dest = dest[:index] elif n_classes is not None and n_classes > 1: if len(self.output_shape) == 2: dest.itemset((index, source), 1.0) else: for idx, value in enumerate(source): dest.itemset(tuple([index, idx, value]), 1.0) else: if len(dest.shape) > 1: dest[index, :] = source else: dest[index] = source[0] if isinstance(source, list) else source return dest def put_data_array_or_dict(holder, index, data=None, n_classes=None): """Puts data array or data dictionary into container.""" if holder is None: return None if isinstance(holder, dict): if data is None: data = {k: None for k in holder.keys()} assert isinstance(data, dict) for k in holder.keys(): num_classes = n_classes[k] if (n_classes is not None and k in n_classes) else None holder[k] = put_data_array(holder[k], index, data[k], num_classes) else: holder = put_data_array(holder, index, data, n_classes) return holder if self.stopped: raise StopIteration inp = init_array(self.input_shape, self._input_dtype) out = init_array(self.output_shape, self._output_dtype) for i in xrange(self._batch_size): # Add handling when queue ends. try: next_inp = six.next(self._x) inp = put_data_array_or_dict(inp, i, next_inp, None) except StopIteration: self.stopped = True if i == 0: raise inp = put_data_array_or_dict(inp, i, None, None) out = put_data_array_or_dict(out, i, None, None) break if self._y is not None: next_out = six.next(self._y) out = put_data_array_or_dict(out, i, next_out, self.n_classes) # creating feed_dict if isinstance(inp, dict): feed_dict = dict([(self._input_placeholder[k].name, inp[k]) for k in list(self._input_placeholder.keys())]) else: feed_dict = {self._input_placeholder.name: inp} if self._y is not None: if isinstance(out, dict): feed_dict.update( dict([(self._output_placeholder[k].name, out[k]) for k in list(self._output_placeholder.keys())])) else: feed_dict.update({self._output_placeholder.name: out}) return feed_dict return _feed_dict_fn class DaskDataFeeder(object): """Data feeder for that reads data from dask.Series and dask.DataFrame. Numpy arrays can be serialized to disk and it's possible to do random seeks into them. DaskDataFeeder will remove requirement to have full dataset in the memory and still do random seeks for sampling of batches. """ def __init__(self, x, y, n_classes, batch_size, shuffle=True, random_state=None, epochs=None): """Initializes a DaskDataFeeder instance. Args: x: iterator that returns for each element, returns features. y: iterator that returns for each element, returns 1 or many classes / regression values. n_classes: indicator of how many classes the label has. batch_size: Mini batch size to accumulate. shuffle: Whether to shuffle the inputs. random_state: random state for RNG. Note that it will mutate so use a int value for this if you want consistent sized batches. epochs: Number of epochs to run. Attributes: x: input features. y: input label. n_classes: number of classes. batch_size: mini batch size to accumulate. input_shape: shape of the input. output_shape: shape of the output. input_dtype: dtype of input. output_dtype: dtype of output. Raises: ValueError: if `x` or `y` are `dict`, as they are not supported currently. """ if isinstance(x, dict) or isinstance(y, dict): raise ValueError( 'DaskDataFeeder does not support dictionaries at the moment.') # pylint: disable=invalid-name,super-init-not-called import dask.dataframe as dd # pylint: disable=g-import-not-at-top # TODO(terrytangyuan): check x and y dtypes in dask_io like pandas self._x = x self._y = y # save column names self._x_columns = list(x.columns) if isinstance(y.columns[0], str): self._y_columns = list(y.columns) else: # deal with cases where two DFs have overlapped default numeric colnames self._y_columns = len(self._x_columns) + 1 self._y = self._y.rename(columns={y.columns[0]: self._y_columns}) # TODO(terrytangyuan): deal with unsupervised cases # combine into a data frame self.df = dd.multi.concat([self._x, self._y], axis=1) self.n_classes = n_classes x_count = x.count().compute()[0] x_shape = (x_count, len(self._x.columns)) y_shape = (x_count, len(self._y.columns)) # TODO(terrytangyuan): Add support for shuffle and epochs. self._shuffle = shuffle self.epochs = epochs self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape( x_shape, y_shape, n_classes, batch_size) self.sample_fraction = self._batch_size / float(x_count) self._input_dtype = _check_dtype(self._x.dtypes[0]) self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns]) if random_state is None: self.random_state = 66 else: self.random_state = random_state def get_feed_params(self): """Function returns a `dict` with data feed params while training. Returns: A `dict` with data feed params while training. """ return {'batch_size': self._batch_size} def get_feed_dict_fn(self, input_placeholder, output_placeholder): """Returns a function, that will sample data and provide it to placeholders. Args: input_placeholder: tf.placeholder for input features mini batch. output_placeholder: tf.placeholder for output labels. Returns: A function that when called samples a random subset of batch size from x and y. """ def _feed_dict_fn(): """Samples data and provides it to placeholders.""" # TODO(ipolosukhin): option for with/without replacement (dev version of # dask) sample = self.df.random_split( [self.sample_fraction, 1 - self.sample_fraction], random_state=self.random_state) inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist() out = extract_pandas_matrix(sample[0][self._y_columns].compute()) # convert to correct dtype inp = np.array(inp, dtype=self._input_dtype) # one-hot encode out for each class for cross entropy loss if HAS_PANDAS: import pandas as pd # pylint: disable=g-import-not-at-top if not isinstance(out, pd.Series): out = out.flatten() out_max = self._y.max().compute().values[0] encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype) encoded_out[np.arange(out.size), out] = 1 return {input_placeholder.name: inp, output_placeholder.name: encoded_out} return _feed_dict_fn
apache-2.0
wooga/airflow
airflow/contrib/hooks/sagemaker_hook.py
5
1267
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """This module is deprecated. Please use `airflow.providers.amazon.aws.hooks.sagemaker`.""" import warnings # pylint: disable=unused-import from airflow.providers.amazon.aws.hooks.sagemaker import ( # noqa LogState, Position, SageMakerHook, argmin, secondary_training_status_changed, secondary_training_status_message, ) warnings.warn( "This module is deprecated. Please use `airflow.providers.amazon.aws.hooks.sagemaker`.", DeprecationWarning, stacklevel=2 )
apache-2.0
nicolasnoble/grpc
src/python/grpcio_tests/tests/unit/resources.py
18
3134
# Copyright 2015 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Constants and functions for data used in testing.""" import os import pkgutil _ROOT_CERTIFICATES_RESOURCE_PATH = 'credentials/ca.pem' _PRIVATE_KEY_RESOURCE_PATH = 'credentials/server1.key' _CERTIFICATE_CHAIN_RESOURCE_PATH = 'credentials/server1.pem' def test_root_certificates(): return pkgutil.get_data(__name__, _ROOT_CERTIFICATES_RESOURCE_PATH) def private_key(): return pkgutil.get_data(__name__, _PRIVATE_KEY_RESOURCE_PATH) def certificate_chain(): return pkgutil.get_data(__name__, _CERTIFICATE_CHAIN_RESOURCE_PATH) def cert_hier_1_root_ca_cert(): return pkgutil.get_data( __name__, 'credentials/certificate_hierarchy_1/certs/ca.cert.pem') def cert_hier_1_intermediate_ca_cert(): return pkgutil.get_data( __name__, 'credentials/certificate_hierarchy_1/intermediate/certs/intermediate.cert.pem' ) def cert_hier_1_client_1_key(): return pkgutil.get_data( __name__, 'credentials/certificate_hierarchy_1/intermediate/private/client.key.pem' ) def cert_hier_1_client_1_cert(): return pkgutil.get_data( __name__, 'credentials/certificate_hierarchy_1/intermediate/certs/client.cert.pem' ) def cert_hier_1_server_1_key(): return pkgutil.get_data( __name__, 'credentials/certificate_hierarchy_1/intermediate/private/localhost-1.key.pem' ) def cert_hier_1_server_1_cert(): return pkgutil.get_data( __name__, 'credentials/certificate_hierarchy_1/intermediate/certs/localhost-1.cert.pem' ) def cert_hier_2_root_ca_cert(): return pkgutil.get_data( __name__, 'credentials/certificate_hierarchy_2/certs/ca.cert.pem') def cert_hier_2_intermediate_ca_cert(): return pkgutil.get_data( __name__, 'credentials/certificate_hierarchy_2/intermediate/certs/intermediate.cert.pem' ) def cert_hier_2_client_1_key(): return pkgutil.get_data( __name__, 'credentials/certificate_hierarchy_2/intermediate/private/client.key.pem' ) def cert_hier_2_client_1_cert(): return pkgutil.get_data( __name__, 'credentials/certificate_hierarchy_2/intermediate/certs/client.cert.pem' ) def cert_hier_2_server_1_key(): return pkgutil.get_data( __name__, 'credentials/certificate_hierarchy_2/intermediate/private/localhost-1.key.pem' ) def cert_hier_2_server_1_cert(): return pkgutil.get_data( __name__, 'credentials/certificate_hierarchy_2/intermediate/certs/localhost-1.cert.pem' )
apache-2.0
e-gob/plataforma-kioscos-autoatencion
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/plugins/action/dellos9.py
2
4467
# # (c) 2016 Red Hat Inc. # # Copyright (c) 2017 Dell Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # from __future__ import (absolute_import, division, print_function) __metaclass__ = type import sys import copy from ansible import constants as C from ansible.plugins.action.normal import ActionModule as _ActionModule from ansible.module_utils.six import iteritems from ansible.module_utils.dellos9 import dellos9_argument_spec from ansible.module_utils.basic import AnsibleFallbackNotFound try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class ActionModule(_ActionModule): def run(self, tmp=None, task_vars=None): if self._play_context.connection != 'local': return dict( failed=True, msg='invalid connection specified, expected connection=local, ' 'got %s' % self._play_context.connection ) provider = self.load_provider() pc = copy.deepcopy(self._play_context) pc.connection = 'network_cli' pc.network_os = 'dellos9' pc.remote_addr = provider['host'] or self._play_context.remote_addr pc.port = int(provider['port'] or self._play_context.port or 22) pc.remote_user = provider['username'] or self._play_context.connection_user pc.password = provider['password'] or self._play_context.password pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file pc.timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT) pc.become = provider['authorize'] or False pc.become_pass = provider['auth_pass'] display.vvv('using connection plugin %s' % pc.connection, pc.remote_addr) connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin) socket_path = connection.run() display.vvvv('socket_path: %s' % socket_path, pc.remote_addr) if not socket_path: return {'failed': True, 'msg': 'unable to open shell. Please see: ' + 'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'} # make sure we are in the right cli context which should be # enable mode and not config module rc, out, err = connection.exec_command('prompt()') while str(out).strip().endswith(')#'): display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr) connection.exec_command('exit') rc, out, err = connection.exec_command('prompt()') task_vars['ansible_socket'] = socket_path if self._play_context.become_method == 'enable': self._play_context.become = False self._play_context.become_method = None result = super(ActionModule, self).run(tmp, task_vars) return result def load_provider(self): provider = self._task.args.get('provider', {}) for key, value in iteritems(dellos9_argument_spec): if key != 'provider' and key not in provider: if key in self._task.args: provider[key] = self._task.args[key] elif 'fallback' in value: provider[key] = self._fallback(value['fallback']) elif key not in provider: provider[key] = None return provider def _fallback(self, fallback): strategy = fallback[0] args = [] kwargs = {} for item in fallback[1:]: if isinstance(item, dict): kwargs = item else: args = item try: return strategy(*args, **kwargs) except AnsibleFallbackNotFound: pass
bsd-3-clause
lexus42/w17
static/Brython3.1.1-20150328-091302/Lib/types.py
756
3167
""" Define names for built-in types that aren't directly accessible as a builtin. """ import sys # Iterators in Python aren't a matter of type but of protocol. A large # and changing number of builtin types implement *some* flavor of # iterator. Don't check the type! Use hasattr to check for both # "__iter__" and "__next__" attributes instead. def _f(): pass FunctionType = type(_f) LambdaType = type(lambda: None) # Same as FunctionType CodeType = type(_f.__code__) MappingProxyType = type(type.__dict__) SimpleNamespace = type(sys.implementation) def _g(): yield 1 GeneratorType = type(_g()) class _C: def _m(self): pass MethodType = type(_C()._m) BuiltinFunctionType = type(len) BuiltinMethodType = type([].append) # Same as BuiltinFunctionType ModuleType = type(sys) try: raise TypeError except TypeError: tb = sys.exc_info()[2] TracebackType = type(tb) FrameType = type(tb.tb_frame) tb = None; del tb # For Jython, the following two types are identical GetSetDescriptorType = type(FunctionType.__code__) MemberDescriptorType = type(FunctionType.__globals__) del sys, _f, _g, _C, # Not for export # Provide a PEP 3115 compliant mechanism for class creation def new_class(name, bases=(), kwds=None, exec_body=None): """Create a class object dynamically using the appropriate metaclass.""" meta, ns, kwds = prepare_class(name, bases, kwds) if exec_body is not None: exec_body(ns) return meta(name, bases, ns, **kwds) def prepare_class(name, bases=(), kwds=None): """Call the __prepare__ method of the appropriate metaclass. Returns (metaclass, namespace, kwds) as a 3-tuple *metaclass* is the appropriate metaclass *namespace* is the prepared class namespace *kwds* is an updated copy of the passed in kwds argument with any 'metaclass' entry removed. If no kwds argument is passed in, this will be an empty dict. """ if kwds is None: kwds = {} else: kwds = dict(kwds) # Don't alter the provided mapping if 'metaclass' in kwds: meta = kwds.pop('metaclass') else: if bases: meta = type(bases[0]) else: meta = type if isinstance(meta, type): # when meta is a type, we first determine the most-derived metaclass # instead of invoking the initial candidate directly meta = _calculate_meta(meta, bases) if hasattr(meta, '__prepare__'): ns = meta.__prepare__(name, bases, **kwds) else: ns = {} return meta, ns, kwds def _calculate_meta(meta, bases): """Calculate the most derived metaclass.""" winner = meta for base in bases: base_meta = type(base) if issubclass(winner, base_meta): continue if issubclass(base_meta, winner): winner = base_meta continue # else: raise TypeError("metaclass conflict: " "the metaclass of a derived class " "must be a (non-strict) subclass " "of the metaclasses of all its bases") return winner
gpl-3.0
numenta/nupic
src/nupic/algorithms/anomaly.py
11
7246
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2014-2016, Numenta, Inc. Unless you have purchased from # Numenta, Inc. a separate commercial license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """Anomaly-related algorithms.""" import numpy from nupic.algorithms.anomaly_likelihood import AnomalyLikelihood from nupic.utils import MovingAverage def computeRawAnomalyScore(activeColumns, prevPredictedColumns): """Computes the raw anomaly score. The raw anomaly score is the fraction of active columns not predicted. :param activeColumns: array of active column indices :param prevPredictedColumns: array of columns indices predicted in prev step :returns: anomaly score 0..1 (float) """ nActiveColumns = len(activeColumns) if nActiveColumns > 0: # Test whether each element of a 1-D array is also present in a second # array. Sum to get the total # of columns that are active and were # predicted. score = numpy.in1d(activeColumns, prevPredictedColumns).sum() # Get the percent of active columns that were NOT predicted, that is # our anomaly score. score = (nActiveColumns - score) / float(nActiveColumns) else: # There are no active columns. score = 0.0 return score class Anomaly(object): """Utility class for generating anomaly scores in different ways. :param slidingWindowSize: [optional] - how many elements are summed up; enables moving average on final anomaly score; int >= 0 :param mode: (string) [optional] how to compute anomaly, one of: - :const:`nupic.algorithms.anomaly.Anomaly.MODE_PURE` - :const:`nupic.algorithms.anomaly.Anomaly.MODE_LIKELIHOOD` - :const:`nupic.algorithms.anomaly.Anomaly.MODE_WEIGHTED` :param binaryAnomalyThreshold: [optional] if set [0,1] anomaly score will be discretized to 1/0 (1 if >= binaryAnomalyThreshold) The transformation is applied after moving average is computed. """ # anomaly modes supported MODE_PURE = "pure" """ Default mode. The raw anomaly score as computed by :func:`~.anomaly_likelihood.computeRawAnomalyScore` """ MODE_LIKELIHOOD = "likelihood" """ Uses the :class:`~.anomaly_likelihood.AnomalyLikelihood` class, which models probability of receiving this value and anomalyScore """ MODE_WEIGHTED = "weighted" """ Multiplies the likelihood result with the raw anomaly score that was used to generate the likelihood (anomaly * likelihood) """ _supportedModes = (MODE_PURE, MODE_LIKELIHOOD, MODE_WEIGHTED) def __init__(self, slidingWindowSize=None, mode=MODE_PURE, binaryAnomalyThreshold=None): self._mode = mode if slidingWindowSize is not None: self._movingAverage = MovingAverage(windowSize=slidingWindowSize) else: self._movingAverage = None if (self._mode == Anomaly.MODE_LIKELIHOOD or self._mode == Anomaly.MODE_WEIGHTED): self._likelihood = AnomalyLikelihood() # probabilistic anomaly else: self._likelihood = None if not self._mode in self._supportedModes: raise ValueError("Invalid anomaly mode; only supported modes are: " "Anomaly.MODE_PURE, Anomaly.MODE_LIKELIHOOD, " "Anomaly.MODE_WEIGHTED; you used: %r" % self._mode) self._binaryThreshold = binaryAnomalyThreshold if binaryAnomalyThreshold is not None and ( not isinstance(binaryAnomalyThreshold, float) or binaryAnomalyThreshold >= 1.0 or binaryAnomalyThreshold <= 0.0 ): raise ValueError("Anomaly: binaryAnomalyThreshold must be from (0,1) " "or None if disabled.") def compute(self, activeColumns, predictedColumns, inputValue=None, timestamp=None): """Compute the anomaly score as the percent of active columns not predicted. :param activeColumns: array of active column indices :param predictedColumns: array of columns indices predicted in this step (used for anomaly in step T+1) :param inputValue: (optional) value of current input to encoders (eg "cat" for category encoder) (used in anomaly-likelihood) :param timestamp: (optional) date timestamp when the sample occured (used in anomaly-likelihood) :returns: the computed anomaly score; float 0..1 """ # Start by computing the raw anomaly score. anomalyScore = computeRawAnomalyScore(activeColumns, predictedColumns) # Compute final anomaly based on selected mode. if self._mode == Anomaly.MODE_PURE: score = anomalyScore elif self._mode == Anomaly.MODE_LIKELIHOOD: if inputValue is None: raise ValueError("Selected anomaly mode 'Anomaly.MODE_LIKELIHOOD' " "requires 'inputValue' as parameter to compute() method. ") probability = self._likelihood.anomalyProbability( inputValue, anomalyScore, timestamp) # low likelihood -> hi anomaly score = 1 - probability elif self._mode == Anomaly.MODE_WEIGHTED: probability = self._likelihood.anomalyProbability( inputValue, anomalyScore, timestamp) score = anomalyScore * (1 - probability) # Last, do moving-average if windowSize was specified. if self._movingAverage is not None: score = self._movingAverage.next(score) # apply binary discretization if required if self._binaryThreshold is not None: if score >= self._binaryThreshold: score = 1.0 else: score = 0.0 return score def __str__(self): windowSize = 0 if self._movingAverage is not None: windowSize = self._movingAverage.windowSize return "Anomaly:\tmode=%s\twindowSize=%r" % (self._mode, windowSize) def __eq__(self, other): return (isinstance(other, Anomaly) and other._mode == self._mode and other._binaryThreshold == self._binaryThreshold and other._movingAverage == self._movingAverage and other._likelihood == self._likelihood) def __setstate__(self, state): """deserialization""" self.__dict__.update(state) if not hasattr(self, '_mode'): self._mode = Anomaly.MODE_PURE if not hasattr(self, '_movingAverage'): self._movingAverage = None if not hasattr(self, '_binaryThreshold'): self._binaryThreshold = None
agpl-3.0
digitvaran/digitvaran
attendance_proxy/functions.py
1
2810
from pyvirtualdisplay import Display from selenium.webdriver.support.ui import Select from selenium import webdriver from bs4 import BeautifulSoup keys=webdriver.common.keys.Keys def scroll_element_into_view(driver, element): """Scroll element into view""" y = element.location['y'] driver.execute_script('window.scrollTo(0, {0})'.format(y)) def get_attendance(program,semester,month): #display for pythonanywhere.com display = Display(visible=0, size=(800, 600)) display.start() # we can now start Firefox and it will run inside the virtual display for retry in range(3): try: browser = webdriver.Firefox() break except: time.sleep(3) browser.implicitly_wait(5) browser.get('http://sscattendance.formistry.com/report/') prog=Select(browser.find_element_by_id('program')) sem=Select(browser.find_element_by_id('semester')) mth=Select(browser.find_element_by_id('month')) #select appropriate options prog.select_by_value(str(program)) sem.select_by_value(str(semester)) if month==0:month='all' mth.select_by_value(str(month)) #------scroll to end and extract records pagesize=browser.find_element_by_id('pagesize') scroll_element_into_view(browser,pagesize) pagesize.send_keys('100\n') report=browser.find_element_by_id('report_block') text=report.get_attribute('innerHTML') #close the browser and the display. browser.quit() display.stop() # ignore any output from this. #create a soup soup=BeautifulSoup(text) #get the headers fo rthe subjects header=soup.find("table", { "id" : "table-header" }).contents[0] heads=[i.text for i in header.findAll('th') if ('tbl_heading' in i.get('class',''))] header=heads[2:] #first one is the student name second is blank and for asthetics #-----get the data tbody=soup.find('table',{'id':'tableid'}).contents[1] #parse data students=[] for number in tbody.findAll('tr'):#for every student columns=[i for i in number.findAll('td')] student_name=columns[0].text columns=columns[2:]#only attendance data data=[student_name] #for every subject for i in range(len(header)): ld,la=columns[i].text ,columns[i+1].text td,ta=columns[i+2].text ,columns[i+3].text pd,pa=columns[i+4].text ,columns[i+5].text data.append(str(la)+'/'+str(ld)) data.append(str(ta)+'/'+str(td)) data.append(str(pa)+'/'+str(pd)) students.append(data) header.insert(0,'Student Name') return students,header if __name__=='__main__': data=get_attendance(33,1,1) for key in data.keys(): print(key) print(data[key]) print('-'*100)
gpl-2.0
stupidnetizen/selenium
py/test/selenium/webdriver/common/element_attribute_tests.py
65
12160
# Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import unittest import pytest class ElementAttributeTests(unittest.TestCase): def testShouldReturnNullWhenGettingTheValueOfAnAttributeThatIsNotListed(self): self._loadSimplePage() head = self.driver.find_element_by_xpath("/html") attribute = head.get_attribute("cheese") self.assertTrue(attribute is None) def testShouldReturnNullWhenGettingSrcAttributeOfInvalidImgTag(self): self._loadSimplePage() img = self.driver.find_element_by_id("invalidImgTag") img_attr = img.get_attribute("src") self.assertTrue(img_attr is None) def testShouldReturnAnAbsoluteUrlWhenGettingSrcAttributeOfAValidImgTag(self): self._loadSimplePage() img = self.driver.find_element_by_id("validImgTag") img_attr = img.get_attribute("src") self.assertTrue("icon.gif" in img_attr) def testShouldReturnAnAbsoluteUrlWhenGettingHrefAttributeOfAValidAnchorTag(self): self._loadSimplePage() img = self.driver.find_element_by_id("validAnchorTag") img_attr = img.get_attribute("href") self.assertTrue("icon.gif" in img_attr) def testShouldReturnEmptyAttributeValuesWhenPresentAndTheValueIsActuallyEmpty(self): self._loadSimplePage() body = self.driver.find_element_by_xpath("//body") self.assertEqual("", body.get_attribute("style")) def testShouldReturnTheValueOfTheDisabledAttributeAsFalseIfNotSet(self): self._loadPage("formPage") inputElement = self.driver.find_element_by_xpath("//input[@id='working']") self.assertEqual(None, inputElement.get_attribute("disabled")) self.assertTrue(inputElement.is_enabled()) pElement = self.driver.find_element_by_id("peas") self.assertEqual(None, pElement.get_attribute("disabled")) self.assertTrue(pElement.is_enabled()) def testShouldReturnTheValueOfTheIndexAttrbuteEvenIfItIsMissing(self): self._loadPage("formPage") multiSelect = self.driver.find_element_by_id("multi") options = multiSelect.find_elements_by_tag_name("option") self.assertEqual("1", options[1].get_attribute("index")) def testShouldIndicateTheElementsThatAreDisabledAreNotis_enabled(self): self._loadPage("formPage") inputElement = self.driver.find_element_by_xpath("//input[@id='notWorking']") self.assertFalse(inputElement.is_enabled()) inputElement = self.driver.find_element_by_xpath("//input[@id='working']") self.assertTrue(inputElement.is_enabled()) def testElementsShouldBeDisabledIfTheyAreDisabledUsingRandomDisabledStrings(self): self._loadPage("formPage") disabledTextElement1 = self.driver.find_element_by_id("disabledTextElement1") self.assertFalse(disabledTextElement1.is_enabled()) disabledTextElement2 = self.driver.find_element_by_id("disabledTextElement2") self.assertFalse(disabledTextElement2.is_enabled()) disabledSubmitElement = self.driver.find_element_by_id("disabledSubmitElement") self.assertFalse(disabledSubmitElement.is_enabled()) def testShouldIndicateWhenATextAreaIsDisabled(self): self._loadPage("formPage") textArea = self.driver.find_element_by_xpath("//textarea[@id='notWorkingArea']") self.assertFalse(textArea.is_enabled()) def testShouldThrowExceptionIfSendingKeysToElementDisabledUsingRandomDisabledStrings(self): self._loadPage("formPage") disabledTextElement1 = self.driver.find_element_by_id("disabledTextElement1") try: disabledTextElement1.send_keys("foo") self.fail("Should have thrown exception") except: pass self.assertEqual("", disabledTextElement1.text) disabledTextElement2 = self.driver.find_element_by_id("disabledTextElement2") try: disabledTextElement2.send_keys("bar") self.fail("Should have thrown exception") except: pass self.assertEqual("", disabledTextElement2.text) def testShouldIndicateWhenASelectIsDisabled(self): self._loadPage("formPage") enabled = self.driver.find_element_by_name("selectomatic") disabled = self.driver.find_element_by_name("no-select") self.assertTrue(enabled.is_enabled()) self.assertFalse(disabled.is_enabled()) def testShouldReturnTheValueOfCheckedForACheckboxEvenIfItLacksThatAttribute(self): self._loadPage("formPage") checkbox = self.driver.find_element_by_xpath("//input[@id='checky']") self.assertTrue(checkbox.get_attribute("checked") is None) checkbox.click() self.assertEqual("true", checkbox.get_attribute("checked")) def testShouldReturnTheValueOfSelectedForRadioButtonsEvenIfTheyLackThatAttribute(self): self._loadPage("formPage") neverSelected = self.driver.find_element_by_id("cheese") initiallyNotSelected = self.driver.find_element_by_id("peas") initiallySelected = self.driver.find_element_by_id("cheese_and_peas") self.assertTrue(neverSelected.get_attribute("selected") is None, "false") self.assertTrue(initiallyNotSelected.get_attribute("selected") is None, "false") self.assertEqual("true", initiallySelected.get_attribute("selected"), "true") initiallyNotSelected.click() self.assertTrue(neverSelected.get_attribute("selected") is None) self.assertEqual("true", initiallyNotSelected.get_attribute("selected")) self.assertTrue(initiallySelected.get_attribute("selected") is None) def testShouldReturnTheValueOfSelectedForOptionsInSelectsEvenIfTheyLackThatAttribute(self): self._loadPage("formPage") selectBox = self.driver.find_element_by_xpath("//select[@name='selectomatic']") options = selectBox.find_elements_by_tag_name("option") one = options[0] two = options[1] self.assertTrue(one.is_selected()) self.assertFalse(two.is_selected()) self.assertEqual("true", one.get_attribute("selected")) self.assertTrue(two.get_attribute("selected") is None) def testShouldReturnValueOfClassAttributeOfAnElement(self): self._loadPage("xhtmlTest") heading = self.driver.find_element_by_xpath("//h1") classname = heading.get_attribute("class") self.assertEqual("header", classname) # Disabled due to issues with Frames #def testShouldReturnValueOfClassAttributeOfAnElementAfterSwitchingIFrame(self): # self._loadPage("iframes") # self.driver.switch_to.frame("iframe1") # # wallace = self.driver.find_element_by_xpath("//div[@id='wallace']") # classname = wallace.get_attribute("class") # self.assertEqual("gromit", classname) def testShouldReturnTheContentsOfATextAreaAsItsValue(self): self._loadPage("formPage") value = self.driver.find_element_by_id("withText").get_attribute("value") self.assertEqual("Example text", value) def testShouldReturnTheContentsOfATextAreaAsItsValueWhenSetToNonNorminalTrue(self): self._loadPage("formPage") e = self.driver.find_element_by_id("withText") self.driver.execute_script("arguments[0].value = 'tRuE'", e) value = e.get_attribute("value") self.assertEqual("tRuE", value) def testShouldTreatReadonlyAsAValue(self): self._loadPage("formPage") element = self.driver.find_element_by_name("readonly") readOnlyAttribute = element.get_attribute("readonly") textInput = self.driver.find_element_by_name("x") notReadOnly = textInput.get_attribute("readonly") self.assertNotEqual(readOnlyAttribute, notReadOnly) def testShouldGetNumericAtribute(self): self._loadPage("formPage") element = self.driver.find_element_by_id("withText") self.assertEqual("5", element.get_attribute("rows")) def testCanReturnATextApproximationOfTheStyleAttribute(self): self._loadPage("javascriptPage") style = self.driver.find_element_by_id("red-item").get_attribute("style") self.assertTrue("background-color" in style.lower()) def testShouldCorrectlyReportValueOfColspan(self): self._loadPage("tables") th1 = self.driver.find_element_by_id("th1") td2 = self.driver.find_element_by_id("td2") self.assertEqual("th1", th1.get_attribute("id")) self.assertEqual("3", th1.get_attribute("colspan")) self.assertEqual("td2", td2.get_attribute("id")); self.assertEquals("2", td2.get_attribute("colspan")); def testCanRetrieveTheCurrentValueOfATextFormField_textInput(self): self._loadPage("formPage") element = self.driver.find_element_by_id("working") self.assertEqual("", element.get_attribute("value")) element.send_keys("hello world") self.assertEqual("hello world", element.get_attribute("value")) def testCanRetrieveTheCurrentValueOfATextFormField_emailInput(self): self._loadPage("formPage") element = self.driver.find_element_by_id("email") self.assertEqual("", element.get_attribute("value")) element.send_keys("hello@example.com") self.assertEqual("hello@example.com", element.get_attribute("value")) def testCanRetrieveTheCurrentValueOfATextFormField_textArea(self): self._loadPage("formPage") element = self.driver.find_element_by_id("emptyTextArea") self.assertEqual("", element.get_attribute("value")) element.send_keys("hello world") self.assertEqual("hello world", element.get_attribute("value")) @pytest.mark.ignore_chrome def testShouldReturnNullForNonPresentBooleanAttributes(self): self._loadPage("booleanAttributes") element1 = self.driver.find_element_by_id("working") self.assertEqual(None, element1.get_attribute("required")) element2 = self.driver.find_element_by_id("wallace") self.assertEqual(None, element2.get_attribute("nowrap")) @pytest.mark.ignore_ie def testShouldReturnTrueForPresentBooleanAttributes(self): self._loadPage("booleanAttributes") element1 = self.driver.find_element_by_id("emailRequired") self.assertEqual("true", element1.get_attribute("required")) element2 = self.driver.find_element_by_id("emptyTextAreaRequired") self.assertEqual("true", element2.get_attribute("required")) element3 = self.driver.find_element_by_id("inputRequired") self.assertEqual("true", element3.get_attribute("required")) element4 = self.driver.find_element_by_id("textAreaRequired") self.assertEqual("true", element4.get_attribute("required")) element5 = self.driver.find_element_by_id("unwrappable") self.assertEqual("true", element5.get_attribute("nowrap")) def tesShouldGetUnicodeCharsFromAttribute(self): self._loadPage("formPage") title = self.driver.find_element_by_id("vsearchGadget").get_attribute("title") self.assertEqual('Hvad s\xf8ger du?', title) def _pageURL(self, name): return self.webserver.where_is(name + '.html') def _loadSimplePage(self): self._loadPage("simpleTest") def _loadPage(self, name): self.driver.get(self._pageURL(name))
apache-2.0
analysiscenter/dataset
batchflow/opensets/imagenette.py
1
5401
""" Contains Imagenette and Imagewoof datasets """ import os from os.path import dirname, basename import tempfile import logging import urllib.request import tarfile from io import BytesIO import PIL import tqdm import numpy as np from sklearn.preprocessing import LabelEncoder from . import ImagesOpenset logger = logging.getLogger('SmallImagenet') class Imagenette(ImagesOpenset): """ Imagenette dataset. Contains 12894 train and 500 test images. Total size 1.4GB. Notes ----- - Datasets contain both grayscale and colored images, ratio ~ 1:100 Argument `drop_grayscale` controls whether grayscale images should be dropped. """ SOURCE_URL = 'https://s3.amazonaws.com/fast-ai-imageclas/imagenette.tgz' num_classes = 10 def __init__(self, *args, drop_grayscale=True, bar=False, preloaded=None, train_test=True, **kwargs): self.bar = tqdm.tqdm(total=2) if bar else None self.drop_grayscale = drop_grayscale super().__init__(*args, preloaded=preloaded, train_test=train_test, **kwargs) if self.bar: self.bar.close() def download(self, path=None): """ Load data from website and extract it into numpy arrays """ def _image_class(filepath): """ Image's class is determined by the parent folder of the image """ return basename(dirname(filepath)) def _is_train(filepath): """ Whether image belongs to train or val parts can be determined by the level 2 parent folder of the image """ return basename(dirname(dirname(filepath))) == 'train' def _extract(archive, member): data = archive.extractfile(member).read() return PIL.Image.open(BytesIO(data)) def _is_file_rgb(archive, member): """ Check whether archive member is a file. In case `drop_grayscale` set to `True` it verifies that the member is the RGB mode image as well. """ if not self.drop_grayscale: return member.isfile() return member.isfile() and _extract(archive, member).mode == 'RGB' def _gather_extracted(archive, files): images = np.array([_extract(archive, file) for file in files], dtype=object) labels = np.array([_image_class(file.name) for file in files]) labels_encoded = LabelEncoder().fit_transform(labels) return images, labels_encoded if path is None: path = tempfile.gettempdir() filename = os.path.basename(self.SOURCE_URL) localname = os.path.join(path, filename) if not os.path.isfile(localname): logger.info("Downloading %s", filename) urllib.request.urlretrieve(self.SOURCE_URL, localname) logger.info("Downloaded %s", filename) if self.bar: self.bar.update(1) logger.info("Extracting...") with tarfile.open(localname, "r:gz") as archive: files_in_archive = archive.getmembers() train_files = [file for file in files_in_archive if _is_file_rgb(archive, file) and _is_train(file.name)] train_data = _gather_extracted(archive, train_files) test_files = [file for file in files_in_archive if _is_file_rgb(archive, file) and not _is_train(file.name)] test_data = _gather_extracted(archive, test_files) logger.info("Extracted") if self.bar: self.bar.update(1) images = np.concatenate([train_data[0], test_data[0]]) labels = np.concatenate([train_data[1], test_data[1]]) preloaded = images, labels train_len, test_len = len(train_data[0]), len(test_data[0]) index, train_index, test_index = self._infer_train_test_index(train_len, test_len) return preloaded, index, train_index, test_index class Imagenette320(Imagenette): """ The '320px' version of Imagenette. The shortest size resized to that size with their aspect ratio maintained. Contains 12894 train and 500 test images. Total size 325MB. """ SOURCE_URL = 'https://s3.amazonaws.com/fast-ai-imageclas/imagenette-320.tgz' class Imagenette160(Imagenette): """ The '160px' version of Imagenette. The shortest size resized to that size with their aspect ratio maintained. Contains 12894 train and 500 test images. Total size 98MB. """ SOURCE_URL = 'https://s3.amazonaws.com/fast-ai-imageclas/imagenette-160.tgz' class ImageWoof(Imagenette): """ Imagewoof dataset. See the https://github.com/fastai/imagenette for details. Contains 12454 train and 500 test images. Total size 1.3GB """ SOURCE_URL = 'https://s3.amazonaws.com/fast-ai-imageclas/imagewoof.tgz' class ImageWoof320(Imagenette): """ The '320px' version of Imagewoof. The shortest size resized to that size with their aspect ratio maintained. Contains 12454 train and 500 test images. Total size 313MB. """ SOURCE_URL = 'https://s3.amazonaws.com/fast-ai-imageclas/imagewoof-320.tgz' class ImageWoof160(Imagenette): """ The '160px' version of Imagewoof. The shortest size resized to that size with their aspect ratio maintained. Contains 12454 train and 500 test images. Total size 88MB """ SOURCE_URL = 'https://s3.amazonaws.com/fast-ai-imageclas/imagewoof-160.tgz'
apache-2.0
NOAA-ORR-ERD/gridded
gridded/tests/test_pysgrid/test_read_netcdf.py
1
2349
""" Created on Apr 7, 2015 @author: ayan """ from __future__ import absolute_import, division, print_function from gridded.pysgrid.read_netcdf import NetCDFDataset, find_grid_topology_var from .write_nc_test_files import roms_sgrid, wrf_sgrid """ Test NetCDF Dataset With Nodes. """ def test_finding_node_variables(roms_sgrid): nc_ds = NetCDFDataset(roms_sgrid) result = nc_ds.find_node_coordinates('xi_psi eta_psi') expected = ('lon_psi', 'lat_psi') assert result == expected def test_find_face_coordinates_by_location(roms_sgrid): nc_ds = NetCDFDataset(roms_sgrid) result = nc_ds.find_coordinates_by_location('face', 2) expected = ('lon_rho', 'lat_rho') assert result == expected def test_find_edge_coordinates_by_location(roms_sgrid): nc_ds = NetCDFDataset(roms_sgrid) result = nc_ds.find_coordinates_by_location('edge1', 2) expected = ('lon_u', 'lat_u') assert result == expected def test_find_grid_topology(roms_sgrid): result = find_grid_topology_var(roms_sgrid) expected = 'grid' assert result == expected def test_find_variables_by_standard_name(roms_sgrid): nc_ds = NetCDFDataset(roms_sgrid) result = nc_ds.find_variables_by_attr(standard_name='time') expected = ['time'] assert result == expected def test_find_variables_by_standard_name_none(roms_sgrid): nc_ds = NetCDFDataset(roms_sgrid) result = nc_ds.find_variables_by_attr(standard_name='some standard_name') assert result == [] def test_sgrid_compliant_check(roms_sgrid): nc_ds = NetCDFDataset(roms_sgrid) result = nc_ds.sgrid_compliant_file() assert result """ Test NetCDF Dataset Without Nodes. """ def test_node_coordinates(wrf_sgrid): nc_ds = NetCDFDataset(wrf_sgrid) node_coordinates = nc_ds.find_node_coordinates('west_east_stag south_north_stag') # noqa assert node_coordinates is None def test_find_variable_by_attr(wrf_sgrid): nc_ds = NetCDFDataset(wrf_sgrid) result = nc_ds.find_variables_by_attr(cf_role='grid_topology', topology_dimension=2) expected = ['grid'] assert result == expected def test_find_variable_by_nonexistant_attr(wrf_sgrid): nc_ds = NetCDFDataset(wrf_sgrid) result = nc_ds.find_variables_by_attr(bird='tufted titmouse') assert result == []
unlicense
oostende/openblachole
lib/python/Plugins/SystemPlugins/SoftwareManager/SoftwareTools.py
2
9282
# -*- coding: iso-8859-1 -*- from time import time from boxbranding import getImageVersion from enigma import eConsoleAppContainer from Components.Console import Console from Components.PackageInfo import PackageInfoHandler from Components.Language import language from Components.Sources.List import List from Components.Ipkg import IpkgComponent from Components.Network import iNetwork from Tools.Directories import resolveFilename, SCOPE_METADIR from boxbranding import getBoxType class SoftwareTools(PackageInfoHandler): lastDownloadDate = None NetworkConnectionAvailable = None list_updating = False available_updates = 0 available_updatelist = [] available_packetlist = [] installed_packetlist = {} def __init__(self): aboutInfo = getImageVersion() if aboutInfo.startswith("dev-"): self.ImageVersion = 'Experimental' else: self.ImageVersion = 'Stable' self.language = language.getLanguage()[:2] # getLanguage returns e.g. "fi_FI" for "language_country" PackageInfoHandler.__init__(self, self.statusCallback, neededTag = 'ALL_TAGS', neededFlag = self.ImageVersion) self.directory = resolveFilename(SCOPE_METADIR) self.list = List([]) self.NotifierCallback = None self.Console = Console() self.UpdateConsole = Console() self.cmdList = [] self.unwanted_extensions = ('-dbg', '-dev', '-doc', '-staticdev', '-src') self.ipkg = IpkgComponent() self.ipkg.addCallback(self.ipkgCallback) def statusCallback(self, status, progress): pass def startSoftwareTools(self, callback = None): if callback is not None: self.NotifierCallback = callback iNetwork.checkNetworkState(self.checkNetworkCB) def checkNetworkCB(self,data): if data is not None: if data <= 2: self.NetworkConnectionAvailable = True self.getUpdates() else: self.NetworkConnectionAvailable = False self.getUpdates() def getUpdates(self, callback = None): if self.lastDownloadDate is None: if self.NetworkConnectionAvailable: self.lastDownloadDate = time() if self.list_updating is False and callback is None: self.list_updating = True self.ipkg.startCmd(IpkgComponent.CMD_UPDATE) elif self.list_updating is False and callback is not None: self.list_updating = True self.NotifierCallback = callback self.ipkg.startCmd(IpkgComponent.CMD_UPDATE) elif self.list_updating is True and callback is not None: self.NotifierCallback = callback else: self.list_updating = False if callback is not None: callback(False) elif self.NotifierCallback is not None: self.NotifierCallback(False) else: if self.NetworkConnectionAvailable: self.lastDownloadDate = time() if self.list_updating is False and callback is None: self.list_updating = True self.ipkg.startCmd(IpkgComponent.CMD_UPDATE) elif self.list_updating is False and callback is not None: self.list_updating = True self.NotifierCallback = callback self.ipkg.startCmd(IpkgComponent.CMD_UPDATE) elif self.list_updating is True and callback is not None: self.NotifierCallback = callback else: if self.list_updating and callback is not None: self.NotifierCallback = callback self.startIpkgListAvailable() else: self.list_updating = False if callback is not None: callback(False) elif self.NotifierCallback is not None: self.NotifierCallback(False) def ipkgCallback(self, event, param): if event == IpkgComponent.EVENT_ERROR: self.list_updating = False if self.NotifierCallback is not None: self.NotifierCallback(False) elif event == IpkgComponent.EVENT_DONE: if self.list_updating: self.startIpkgListAvailable() pass def startIpkgListAvailable(self, callback = None): if callback is not None: self.list_updating = True if self.list_updating: if not self.UpdateConsole: self.UpdateConsole = Console() cmd = self.ipkg.ipkg + " list" self.UpdateConsole.ePopen(cmd, self.IpkgListAvailableCB, callback) def IpkgListAvailableCB(self, result, retval, extra_args = None): (callback) = extra_args or None if result: if self.list_updating: self.available_packetlist = [] for x in result.splitlines(): tokens = x.split(' - ') name = tokens[0].strip() if not any(name.endswith(x) for x in self.unwanted_extensions): l = len(tokens) version = l > 1 and tokens[1].strip() or "" descr = l > 2 and tokens[2].strip() or "" self.available_packetlist.append([name, version, descr]) if callback is None: self.startInstallMetaPackage() else: if self.UpdateConsole: if len(self.UpdateConsole.appContainers) == 0: callback(True) else: self.list_updating = False if self.UpdateConsole: if len(self.UpdateConsole.appContainers) == 0: if callback is not None: callback(False) def startInstallMetaPackage(self, callback = None): if callback is not None: self.list_updating = True if self.list_updating: if self.NetworkConnectionAvailable: if not self.UpdateConsole: self.UpdateConsole = Console() cmd = self.ipkg.ipkg + " install enigma2-meta enigma2-plugins-meta enigma2-skins-meta" self.UpdateConsole.ePopen(cmd, self.InstallMetaPackageCB, callback) else: self.InstallMetaPackageCB(True) def InstallMetaPackageCB(self, result, retval = None, extra_args = None): (callback) = extra_args or None if result: self.fillPackagesIndexList() if callback is None: self.startIpkgListInstalled() else: if self.UpdateConsole: if len(self.UpdateConsole.appContainers) == 0: callback(True) else: self.list_updating = False if self.UpdateConsole: if len(self.UpdateConsole.appContainers) == 0: if callback is not None: callback(False) def startIpkgListInstalled(self, callback = None): if callback is not None: self.list_updating = True if self.list_updating: if not self.UpdateConsole: self.UpdateConsole = Console() cmd = self.ipkg.ipkg + " list_installed" self.UpdateConsole.ePopen(cmd, self.IpkgListInstalledCB, callback) def IpkgListInstalledCB(self, result, retval, extra_args = None): (callback) = extra_args or None if result: self.installed_packetlist = {} for x in result.splitlines(): tokens = x.split(' - ') name = tokens[0].strip() if not any(name.endswith(x) for x in self.unwanted_extensions): l = len(tokens) version = l > 1 and tokens[1].strip() or "" self.installed_packetlist[name] = version for package in self.packagesIndexlist[:]: if not self.verifyPrerequisites(package[0]["prerequisites"]): self.packagesIndexlist.remove(package) for package in self.packagesIndexlist[:]: attributes = package[0]["attributes"] if attributes.has_key("packagetype"): if attributes["packagetype"] == "internal": self.packagesIndexlist.remove(package) if callback is None: self.countUpdates() else: if self.UpdateConsole: if len(self.UpdateConsole.appContainers) == 0: callback(True) else: self.list_updating = False if self.UpdateConsole: if len(self.UpdateConsole.appContainers) == 0: if callback is not None: callback(False) def countUpdates(self, callback = None): self.available_updates = 0 self.available_updatelist = [] for package in self.packagesIndexlist[:]: attributes = package[0]["attributes"] packagename = attributes["packagename"] for x in self.available_packetlist: if x[0] == packagename: if self.installed_packetlist.has_key(packagename): if self.installed_packetlist[packagename] != x[1]: self.available_updates +=1 self.available_updatelist.append([packagename]) self.list_updating = False if self.UpdateConsole: if len(self.UpdateConsole.appContainers) == 0: if callback is not None: callback(True) callback = None elif self.NotifierCallback is not None: self.NotifierCallback(True) self.NotifierCallback = None def startIpkgUpdate(self, callback = None): if not self.Console: self.Console = Console() cmd = self.ipkg.ipkg + " update" self.Console.ePopen(cmd, self.IpkgUpdateCB, callback) def IpkgUpdateCB(self, result, retval, extra_args = None): (callback) = extra_args or None if result: if self.Console: if len(self.Console.appContainers) == 0: if callback is not None: callback(True) callback = None def cleanupSoftwareTools(self): self.list_updating = False if self.NotifierCallback is not None: self.NotifierCallback = None self.ipkg.stop() if self.Console is not None: if len(self.Console.appContainers): for name in self.Console.appContainers.keys(): self.Console.kill(name) if self.UpdateConsole is not None: if len(self.UpdateConsole.appContainers): for name in self.UpdateConsole.appContainers.keys(): self.UpdateConsole.kill(name) def verifyPrerequisites(self, prerequisites): if prerequisites.has_key("hardware"): hardware_found = False for hardware in prerequisites["hardware"]: if hardware == getBoxType(): hardware_found = True if not hardware_found: return False return True iSoftwareTools = SoftwareTools()
gpl-2.0
chokribr/inveniotest
modules/bibformat/lib/elements/bfe_arxiv_link.py
16
1791
## This file is part of Invenio. ## Copyright (C) 2014 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """BibFormat element - Links to arXiv""" from cgi import escape from invenio.messages import gettext_set_language def format_element(bfo, tag="037__", target="_blank"): """ Extracts the arXiv preprint information and presents it as a direct link towards arXiv.org """ _ = gettext_set_language(bfo.lang) potential_arxiv_ids = bfo.fields(tag) arxiv_id = "" for potential_arxiv_id in potential_arxiv_ids: if potential_arxiv_id.get('9') == 'arXiv' and potential_arxiv_id.get('a', '').startswith('arXiv:'): arxiv_id = potential_arxiv_id['a'][len('arXiv:'):] return '<a href="http://arxiv.org/abs/%s" target="%s" alt="%s">%s</a>' % ( escape(arxiv_id, True), escape(target, True), escape(_("This article on arXiv.org"), True), escape(arxiv_id)) return "" def escape_values(bfo): """ Called by BibFormat in order to check if output of this element should be escaped. """ return 0
gpl-2.0
v-iam/azure-sdk-for-python
azure-batch/azure/batch/models/pool_exists_options.py
3
3078
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class PoolExistsOptions(Model): """Additional parameters for the Pool_exists operation. :param timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. Default value: 30 . :type timeout: int :param client_request_id: The caller-generated request identity, in the form of a GUID with no decoration such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. :type client_request_id: str :param return_client_request_id: Whether the server should return the client-request-id in the response. Default value: False . :type return_client_request_id: bool :param ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. :type ocp_date: datetime :param if_match: An ETag value associated with the version of the resource known to the client. The operation will be performed only if the resource's current ETag on the service exactly matches the value specified by the client. :type if_match: str :param if_none_match: An ETag value associated with the version of the resource known to the client. The operation will be performed only if the resource's current ETag on the service does not match the value specified by the client. :type if_none_match: str :param if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has been modified since the specified time. :type if_modified_since: datetime :param if_unmodified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has not been modified since the specified time. :type if_unmodified_since: datetime """ def __init__(self, timeout=30, client_request_id=None, return_client_request_id=False, ocp_date=None, if_match=None, if_none_match=None, if_modified_since=None, if_unmodified_since=None): self.timeout = timeout self.client_request_id = client_request_id self.return_client_request_id = return_client_request_id self.ocp_date = ocp_date self.if_match = if_match self.if_none_match = if_none_match self.if_modified_since = if_modified_since self.if_unmodified_since = if_unmodified_since
mit
sunqm/pyscf
pyscf/cc/test/test_eom_rccsd.py
2
38148
#!/usr/bin/env python # Copyright 2014-2018 The PySCF Developers. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import copy import numpy from functools import reduce from pyscf import lib from pyscf import gto from pyscf import scf from pyscf import cc from pyscf import ao2mo from pyscf.cc import ccsd, rccsd, eom_rccsd, rintermediates, gintermediates mol = gto.Mole() mol.atom = [ [8 , (0. , 0. , 0.)], [1 , (0. , -0.757 , 0.587)], [1 , (0. , 0.757 , 0.587)]] mol.basis = 'cc-pvdz' mol.verbose = 0 mol.spin = 0 mol.build() mf = scf.RHF(mol).run() mycc = rccsd.RCCSD(mf).run() def make_mycc1(): mf1 = copy.copy(mf) no = mol.nelectron // 2 n = mol.nao_nr() nv = n - no mf1.mo_occ = numpy.zeros(mol.nao_nr()) mf1.mo_occ[:no] = 2 numpy.random.seed(12) mf1.mo_coeff = numpy.random.random((n,n)) dm = mf1.make_rdm1(mf1.mo_coeff, mf1.mo_occ) fockao = mf1.get_hcore() + mf1.get_veff(mol, dm) mf1.mo_energy = numpy.einsum('pi,pq,qi->i', mf1.mo_coeff, fockao, mf1.mo_coeff) idx = numpy.hstack([mf1.mo_energy[:no].argsort(), no+mf1.mo_energy[no:].argsort()]) mf1.mo_coeff = mf1.mo_coeff[:,idx] mycc1 = rccsd.RCCSD(mf1) eris1 = mycc1.ao2mo() numpy.random.seed(12) r1 = numpy.random.random((no,nv)) - .9 r2 = numpy.random.random((no,no,nv,nv)) - .9 r2 = r2 + r2.transpose(1,0,3,2) mycc1.t1 = r1*1e-5 mycc1.t2 = r2*1e-5 return mf1, mycc1, eris1 mf1, mycc1, eris1 = make_mycc1() no, nv = mycc1.t1.shape mycci = copy.copy(mycc1) erisi = copy.copy(eris1) erisi.oooo = eris1.oooo + numpy.sin(eris1.oooo)*1j erisi.oooo = erisi.oooo + erisi.oooo.conj().transpose(1,0,3,2) erisi.ovoo = eris1.ovoo + numpy.sin(eris1.ovoo)*1j erisi.ovvo = eris1.ovvo + numpy.sin(eris1.ovvo)*1j erisi.oovv = eris1.oovv + numpy.sin(eris1.oovv)*1j erisi.oovv = erisi.oovv + erisi.oovv.conj().transpose(1,0,3,2) erisi.ovov = eris1.ovov + numpy.sin(eris1.ovov)*1j erisi.ovvv = eris1.ovvv + numpy.sin(eris1.ovvv)*1j erisi.vvvv = eris1.vvvv + numpy.sin(eris1.vvvv)*1j erisi.vvvv = erisi.vvvv + erisi.vvvv.conj().transpose(1,0,3,2) mycc2 = ccsd.CCSD(mf) mycc21 = ccsd.CCSD(mf1) mycc2.__dict__.update(mycc.__dict__) mycc21.__dict__.update(mycc1.__dict__) eris21 = mycc21.ao2mo() mycc3 = ccsd.CCSD(mf) mycc31 = ccsd.CCSD(mf1) mycc3.__dict__.update(mycc.__dict__) mycc31.__dict__.update(mycc1.__dict__) mycc3 = mycc3.set(max_memory=0, direct=True) mycc31 = mycc31.set(max_memory=0, direct=True) eris31 = mycc31.ao2mo() def tearDownModule(): global mol, mf, mycc, mf1, eris1, mycc1, mycci, erisi, mycc2, mycc21, eris21, mycc3, mycc31, eris31 del mol, mf, mycc, mf1, eris1, mycc1, mycci, erisi, mycc2, mycc21, eris21, mycc3, mycc31, eris31 class KnownValues(unittest.TestCase): def test_ipccsd(self): eom = mycc.eomip_method() e,v = eom.kernel(nroots=1, left=False, koopmans=False) e = eom.eip self.assertAlmostEqual(e, 0.4335604332073799, 6) e,v = mycc.ipccsd(nroots=3) self.assertAlmostEqual(e[0], 0.4335604332073799, 6) self.assertAlmostEqual(e[1], 0.5187659896045407, 6) self.assertAlmostEqual(e[2], 0.6782876002229172, 6) myeom = eom_rccsd.EOMIP(mycc) lv = myeom.ipccsd(nroots=3, left=True)[1] e = myeom.ipccsd_star_contract(e, v, lv) self.assertAlmostEqual(e[0], 0.43793202122290747, 6) self.assertAlmostEqual(e[1], 0.52287073076243218, 6) self.assertAlmostEqual(e[2], 0.67994597799835099, 6) def test_ipccsd_koopmans(self): e,v = mycc.ipccsd(nroots=3, koopmans=True) self.assertAlmostEqual(e[0], 0.4335604332073799, 6) self.assertAlmostEqual(e[1], 0.5187659896045407, 6) self.assertAlmostEqual(e[2], 0.6782876002229172, 6) e,v = mycc.ipccsd(nroots=3, guess=v[:3]) self.assertAlmostEqual(e[0], 0.4335604332073799, 6) self.assertAlmostEqual(e[1], 0.5187659896045407, 6) self.assertAlmostEqual(e[2], 0.6782876002229172, 6) def test_ipccsd_partition(self): e,v = mycc.ipccsd(nroots=3, partition='mp') self.assertAlmostEqual(e[0], 0.42728862799879663, 6) self.assertAlmostEqual(e[1], 0.51359478811505332, 6) self.assertAlmostEqual(e[2], 0.67382901297144682, 6) e,v = mycc.ipccsd(nroots=3, partition='full') self.assertAlmostEqual(e[0], 0.42291981842588938, 6) self.assertAlmostEqual(e[1], 0.50992428154417802, 6) self.assertAlmostEqual(e[2], 0.67006510349161119, 6) e,v = mycc.ipccsd(nroots=3, partition='mp', left=True) self.assertAlmostEqual(e[0], 0.42728862799879663, 6) self.assertAlmostEqual(e[1], 0.51359478811505332, 6) self.assertAlmostEqual(e[2], 0.67382901297144682, 6) e,v = mycc.ipccsd(nroots=3, partition='full', left=True) self.assertAlmostEqual(e[0], 0.42291981842588938, 6) self.assertAlmostEqual(e[1], 0.50992428154417802, 6) self.assertAlmostEqual(e[2], 0.67006510349161119, 6) def test_eaccsd(self): eom = mycc.eomea_method() e,v = eom.kernel(nroots=1, left=False, koopmans=False) e = eom.eea self.assertAlmostEqual(e, 0.16737886338859731, 6) e,v = mycc.eaccsd(nroots=3) self.assertAlmostEqual(e[0], 0.16737886338859731, 6) self.assertAlmostEqual(e[1], 0.24027613852009164, 6) self.assertAlmostEqual(e[2], 0.51006797826488071, 6) myeom = eom_rccsd.EOMEA(mycc) lv = myeom.eaccsd(nroots=3, left=True)[1] e = myeom.eaccsd_star_contract(e, v, lv) self.assertAlmostEqual(e[0], 0.16656250872624662, 6) self.assertAlmostEqual(e[1], 0.2394414445283693, 6) self.assertAlmostEqual(e[2], 0.41399434356202935, 6) def test_eaccsd_koopmans(self): e,v = mycc.eaccsd(nroots=3, koopmans=True) self.assertAlmostEqual(e[0], 0.16737886338859731, 6) self.assertAlmostEqual(e[1], 0.24027613852009164, 6) self.assertAlmostEqual(e[2], 0.73443352557582653, 6) e,v = mycc.eaccsd(nroots=3, guess=v[:3]) self.assertAlmostEqual(e[0], 0.16737886338859731, 6) self.assertAlmostEqual(e[1], 0.24027613852009164, 6) self.assertAlmostEqual(e[2], 0.73443352557582653, 6) def test_eaccsd_partition(self): e,v = mycc.eaccsd(nroots=3, partition='mp') self.assertAlmostEqual(e[0], 0.16947311575051136, 6) self.assertAlmostEqual(e[1], 0.24234326468848749, 6) self.assertAlmostEqual(e[2], 0.7434661346653969 , 6) e,v = mycc.eaccsd(nroots=3, partition='full') self.assertAlmostEqual(e[0], 0.16418276148493574, 6) self.assertAlmostEqual(e[1], 0.23683978491376495, 6) self.assertAlmostEqual(e[2], 0.55640091560545624, 6) e,v = mycc.eaccsd(nroots=3, partition='mp', left=True) self.assertAlmostEqual(e[0], 0.16947311575051136, 6) self.assertAlmostEqual(e[1], 0.24234326468848749, 6) self.assertAlmostEqual(e[2], 0.7434661346653969 , 6) e,v = mycc.eaccsd(nroots=3, partition='full', left=True) self.assertAlmostEqual(e[0], 0.16418276148493574, 6) self.assertAlmostEqual(e[1], 0.23683978491376495, 6) self.assertAlmostEqual(e[2], 0.55640091560545624, 6) def test_eeccsd(self): eom = mycc.eomee_method() e,v = eom.kernel(nroots=1, koopmans=False) e = eom.eee self.assertAlmostEqual(e, 0.2757159395886167, 6) e,v = mycc.eeccsd(nroots=4) self.assertAlmostEqual(e[0], 0.2757159395886167, 6) self.assertAlmostEqual(e[1], 0.2757159395886167, 6) self.assertAlmostEqual(e[2], 0.2757159395886167, 6) self.assertAlmostEqual(e[3], 0.3005716731825082, 6) def test_eeccsd_koopmans(self): e,v = mycc.eeccsd(nroots=4, koopmans=True) self.assertAlmostEqual(e[0], 0.2757159395886167, 6) self.assertAlmostEqual(e[1], 0.2757159395886167, 6) self.assertAlmostEqual(e[2], 0.2757159395886167, 6) self.assertAlmostEqual(e[3], 0.3005716731825082, 6) e,v = mycc.eeccsd(nroots=4, guess=v[:4]) self.assertAlmostEqual(e[0], 0.2757159395886167, 6) self.assertAlmostEqual(e[1], 0.2757159395886167, 6) self.assertAlmostEqual(e[2], 0.2757159395886167, 6) self.assertAlmostEqual(e[3], 0.3005716731825082, 6) def test_eomee_ccsd_singlet(self): e, v = mycc.eomee_ccsd_singlet(nroots=1) self.assertAlmostEqual(e, 0.3005716731825082, 6) def test_eomee_ccsd_triplet(self): e, v = mycc.eomee_ccsd_triplet(nroots=1) self.assertAlmostEqual(e, 0.2757159395886167, 6) def test_eomsf_ccsd(self): e, v = mycc.eomsf_ccsd(nroots=1) self.assertAlmostEqual(e, 0.2757159395886167, 6) def test_vector_to_amplitudes(self): t1, t2 = mycc1.vector_to_amplitudes(mycc1.amplitudes_to_vector(mycc1.t1, mycc1.t2)) self.assertAlmostEqual(abs(mycc1.t1-t1).sum(), 0, 9) self.assertAlmostEqual(abs(mycc1.t2-t2).sum(), 0, 9) def test_eomee_ccsd_matvec_singlet(self): numpy.random.seed(10) r1 = numpy.random.random((no,nv)) - .9 r2 = numpy.random.random((no,no,nv,nv)) - .9 r2 = r2 + r2.transpose(1,0,3,2) myeom = eom_rccsd.EOMEESinglet(mycc1) vec = myeom.amplitudes_to_vector(r1,r2) imds = myeom.make_imds(eris1) vec1 = myeom.matvec(vec, imds) r1, r2 = myeom.vector_to_amplitudes(vec1) self.assertAlmostEqual(lib.finger(r1), -112883.3791497977, 8) self.assertAlmostEqual(lib.finger(r2), -268199.3475813322, 8) def test_eomee_ccsd_matvec_triplet(self): numpy.random.seed(10) r1 = numpy.random.random((no,nv)) - .9 r2 = numpy.random.random((2,no,no,nv,nv)) - .9 r2[0] = r2[0] - r2[0].transpose(0,1,3,2) r2[0] = r2[0] - r2[0].transpose(1,0,2,3) r2[1] = r2[1] - r2[1].transpose(1,0,3,2) myeom = eom_rccsd.EOMEETriplet(mycc1) vec = myeom.amplitudes_to_vector(r1, r2) imds = myeom.make_imds(eris1) vec1 = myeom.matvec(vec, imds) r1, r2 = myeom.vector_to_amplitudes(vec1) self.assertAlmostEqual(lib.finger(r1 ), 3550.5250670914056, 9) self.assertAlmostEqual(lib.finger(r2[0]), -237433.03756895234,8) self.assertAlmostEqual(lib.finger(r2[1]), 127680.0182437716 , 8) def test_eomsf_ccsd_matvec(self): numpy.random.seed(10) r1 = numpy.random.random((no,nv)) - .9 r2 = numpy.random.random((2,no,no,nv,nv)) - .9 myeom = eom_rccsd.EOMEESpinFlip(mycc1) vec = myeom.amplitudes_to_vector(r1,r2) imds = myeom.make_imds(eris1) vec1 = myeom.matvec(vec, imds) r1, r2 = myeom.vector_to_amplitudes(vec1) self.assertAlmostEqual(lib.finger(r1 ), -19368.729268465482, 8) self.assertAlmostEqual(lib.finger(r2[0]), 84325.863680611626 , 8) self.assertAlmostEqual(lib.finger(r2[1]), 6715.9574457836134 , 8) def test_eomee_diag(self): vec1S, vec1T, vec2 = eom_rccsd.EOMEE(mycc1).get_diag() self.assertAlmostEqual(lib.finger(vec1S),-4714.9854130015719, 9) self.assertAlmostEqual(lib.finger(vec1T), 2221.3155272953709, 9) self.assertAlmostEqual(lib.finger(vec2) ,-5486.1611871545592, 9) def test_ip_matvec(self): numpy.random.seed(12) r1 = numpy.random.random((no)) - .9 r2 = numpy.random.random((no,no,nv)) - .9 myeom = eom_rccsd.EOMIP(mycc1) vec = myeom.amplitudes_to_vector(r1,r2) r1,r2 = myeom.vector_to_amplitudes(vec) myeom.partition = 'mp' self.assertAlmostEqual(lib.finger(r1), 0.37404344676857076, 12) self.assertAlmostEqual(lib.finger(r2), -1.1568913404570922, 12) imds = myeom.make_imds(eris1) vec1 = myeom.matvec(vec, imds) self.assertAlmostEqual(lib.finger(vec1), -14894.669606811192, 9) self.assertAlmostEqual(lib.finger(myeom.get_diag()), 1182.3095479451745, 9) myeom.partition = 'full' imds = myeom.make_imds(eris1) diag = myeom.get_diag(imds) vec1 = myeom.matvec(vec, imds, diag=diag) self.assertAlmostEqual(lib.finger(vec1), -3795.9122245246967, 9) self.assertAlmostEqual(lib.finger(diag), 1106.260154202434, 9) def test_ea_matvec(self): numpy.random.seed(12) r1 = numpy.random.random((nv)) - .9 r2 = numpy.random.random((no,nv,nv)) - .9 myeom = eom_rccsd.EOMEA(mycc1) vec = myeom.amplitudes_to_vector(r1,r2) r1,r2 = myeom.vector_to_amplitudes(vec) myeom.partition = 'mp' self.assertAlmostEqual(lib.finger(r1), 1.4488291275539353, 12) self.assertAlmostEqual(lib.finger(r2), 0.97080165032287469, 12) imds = myeom.make_imds(eris1) vec1 = myeom.matvec(vec, imds) self.assertAlmostEqual(lib.finger(vec1), -34426.363943760276, 9) self.assertAlmostEqual(lib.finger(myeom.get_diag()), 2724.8239646679217, 9) myeom.partition = 'full' imds = myeom.make_imds(eris1) diag = myeom.get_diag(imds) vec1 = myeom.matvec(vec, imds, diag=diag) self.assertAlmostEqual(lib.finger(vec1), -17030.363405297598, 9) self.assertAlmostEqual(lib.finger(diag), 4688.9122122011922, 9) ######################################## # Complex integrals def test_ip_matvec1(self): numpy.random.seed(12) r1 = numpy.random.random((no))-.9 + numpy.random.random((no))*.2j r2 = (numpy.random.random((no,no,nv))-.9 + numpy.random.random((no,no,nv))*.2j) myeom = eom_rccsd.EOMIP(mycci) vec = myeom.amplitudes_to_vector(r1,r2) r1,r2 = myeom.vector_to_amplitudes(vec) eris1 imds = myeom.make_imds(erisi) vec1 = myeom.matvec(vec, imds) self.assertAlmostEqual(lib.finger(vec1), 25176.428829164193-4955.5351324520125j, 9) self.assertAlmostEqual(lib.finger(myeom.get_diag()), 1106.2601542024306, 9) def test_ea_matvec1(self): numpy.random.seed(12) r1 = numpy.random.random((nv))-.9 + numpy.random.random((nv))*.2j r2 = (numpy.random.random((no,nv,nv))-.9 + numpy.random.random((no,nv,nv))*.2j) myeom = eom_rccsd.EOMEA(mycci) vec = myeom.amplitudes_to_vector(r1,r2) r1,r2 = myeom.vector_to_amplitudes(vec) imds = myeom.make_imds(erisi) vec1 = myeom.matvec(vec, imds) self.assertAlmostEqual(lib.finger(vec1), -105083.60825558871+25155.909195554908j, 8) self.assertAlmostEqual(lib.finger(myeom.get_diag()), 4688.9122122011895, 9) ######################################## # With 4-fold symmetry in integrals def test_ipccsd2(self): e,v = mycc2.ipccsd(nroots=1) self.assertAlmostEqual(e, 0.4335604332073799, 6) e,v = mycc2.ipccsd(nroots=3) self.assertAlmostEqual(e[0], 0.4335604332073799, 6) self.assertAlmostEqual(e[1], 0.5187659896045407, 6) self.assertAlmostEqual(e[2], 0.6782876002229172, 6) myeom = eom_rccsd.EOMIP(mycc2) lv = myeom.ipccsd(nroots=3, left=True)[1] e = myeom.ipccsd_star_contract(e, v, lv) self.assertAlmostEqual(e[0], 0.43793202122290747, 6) self.assertAlmostEqual(e[1], 0.52287073076243218, 6) self.assertAlmostEqual(e[2], 0.67994597799835099, 6) def test_ipccsd_koopmans2(self): e,v = mycc2.ipccsd(nroots=3, koopmans=True) self.assertAlmostEqual(e[0], 0.4335604332073799, 6) self.assertAlmostEqual(e[1], 0.5187659896045407, 6) self.assertAlmostEqual(e[2], 0.6782876002229172, 6) e,v = mycc2.ipccsd(nroots=3, guess=v[:3]) self.assertAlmostEqual(e[0], 0.4335604332073799, 6) self.assertAlmostEqual(e[1], 0.5187659896045407, 6) self.assertAlmostEqual(e[2], 0.6782876002229172, 6) def test_ipccsd_partition2(self): e,v = mycc2.ipccsd(nroots=3, partition='mp') self.assertAlmostEqual(e[0], 0.42728862799879663, 6) self.assertAlmostEqual(e[1], 0.51359478811505332, 6) self.assertAlmostEqual(e[2], 0.67382901297144682, 6) e,v = mycc2.ipccsd(nroots=3, partition='full') self.assertAlmostEqual(e[0], 0.42291981842588938, 6) self.assertAlmostEqual(e[1], 0.50992428154417802, 6) self.assertAlmostEqual(e[2], 0.67006510349161119, 6) def test_eaccsd2(self): e,v = mycc2.eaccsd(nroots=1) self.assertAlmostEqual(e, 0.16737886338859731, 6) e,v = mycc2.eaccsd(nroots=3) self.assertAlmostEqual(e[0], 0.16737886338859731, 6) self.assertAlmostEqual(e[1], 0.24027613852009164, 6) self.assertAlmostEqual(e[2], 0.51006797826488071, 6) myeom = eom_rccsd.EOMEA(mycc2) lv = myeom.eaccsd(nroots=3, left=True)[1] e = myeom.eaccsd_star_contract(e, v, lv) self.assertAlmostEqual(e[0], 0.16656250872624662, 6) self.assertAlmostEqual(e[1], 0.2394414445283693, 6) self.assertAlmostEqual(e[2], 0.41399434356202935, 6) def test_eaccsd_koopmans2(self): e,v = mycc2.eaccsd(nroots=3, koopmans=True) self.assertAlmostEqual(e[0], 0.16737886338859731, 6) self.assertAlmostEqual(e[1], 0.24027613852009164, 6) self.assertAlmostEqual(e[2], 0.73443352557582653, 6) e,v = mycc2.eaccsd(nroots=3, guess=v[:3]) self.assertAlmostEqual(e[0], 0.16737886338859731, 6) self.assertAlmostEqual(e[1], 0.24027613852009164, 6) self.assertAlmostEqual(e[2], 0.73443352557582653, 6) def test_eaccsd_partition2(self): e,v = mycc2.eaccsd(nroots=3, partition='mp') self.assertAlmostEqual(e[0], 0.16947311575051136, 6) self.assertAlmostEqual(e[1], 0.24234326468848749, 6) self.assertAlmostEqual(e[2], 0.7434661346653969 , 6) e,v = mycc2.eaccsd(nroots=3, partition='full') self.assertAlmostEqual(e[0], 0.16418276148493574, 6) self.assertAlmostEqual(e[1], 0.23683978491376495, 6) self.assertAlmostEqual(e[2], 0.55640091560545624, 6) def test_eeccsd2(self): e,v = mycc2.eeccsd(nroots=1) self.assertAlmostEqual(e, 0.2757159395886167, 6) e,v = mycc2.eeccsd(nroots=4) self.assertAlmostEqual(e[0], 0.2757159395886167, 6) self.assertAlmostEqual(e[1], 0.2757159395886167, 6) self.assertAlmostEqual(e[2], 0.2757159395886167, 6) self.assertAlmostEqual(e[3], 0.3005716731825082, 6) def test_eeccsd_koopmans2(self): e,v = mycc2.eeccsd(nroots=4, koopmans=True) self.assertAlmostEqual(e[0], 0.2757159395886167, 6) self.assertAlmostEqual(e[1], 0.2757159395886167, 6) self.assertAlmostEqual(e[2], 0.2757159395886167, 6) self.assertAlmostEqual(e[3], 0.3005716731825082, 6) e,v = mycc2.eeccsd(nroots=4, guess=v[:4]) self.assertAlmostEqual(e[0], 0.2757159395886167, 6) self.assertAlmostEqual(e[1], 0.2757159395886167, 6) self.assertAlmostEqual(e[2], 0.2757159395886167, 6) self.assertAlmostEqual(e[3], 0.3005716731825082, 6) def test_eomee_ccsd_matvec_singlet2(self): numpy.random.seed(10) r1 = numpy.random.random((no,nv)) - .9 r2 = numpy.random.random((no,no,nv,nv)) - .9 r2 = r2 + r2.transpose(1,0,3,2) myeom = eom_rccsd.EOMEESinglet(mycc21) vec = myeom.amplitudes_to_vector(r1,r2) imds = myeom.make_imds(eris21) vec1 = myeom.matvec(vec, imds) r1, r2 = myeom.vector_to_amplitudes(vec1) self.assertAlmostEqual(lib.finger(r1), -112883.3791497977, 8) self.assertAlmostEqual(lib.finger(r2), -268199.3475813322, 8) def test_eomee_ccsd_matvec_triplet2(self): numpy.random.seed(10) r1 = numpy.random.random((no,nv)) - .9 r2 = numpy.random.random((2,no,no,nv,nv)) - .9 r2[0] = r2[0] - r2[0].transpose(0,1,3,2) r2[0] = r2[0] - r2[0].transpose(1,0,2,3) r2[1] = r2[1] - r2[1].transpose(1,0,3,2) myeom = eom_rccsd.EOMEETriplet(mycc21) vec = myeom.amplitudes_to_vector(r1, r2) imds = myeom.make_imds(eris21) vec1 = myeom.matvec(vec, imds) r1, r2 = myeom.vector_to_amplitudes(vec1) self.assertAlmostEqual(lib.finger(r1 ), 3550.5250670914056, 9) self.assertAlmostEqual(lib.finger(r2[0]), -237433.03756895234,8) self.assertAlmostEqual(lib.finger(r2[1]), 127680.0182437716 , 8) def test_eomsf_ccsd_matvec2(self): numpy.random.seed(10) r1 = numpy.random.random((no,nv)) - .9 r2 = numpy.random.random((2,no,no,nv,nv)) - .9 myeom = eom_rccsd.EOMEESpinFlip(mycc21) vec = myeom.amplitudes_to_vector(r1,r2) imds = myeom.make_imds(eris21) vec1 = myeom.matvec(vec, imds) r1, r2 = myeom.vector_to_amplitudes(vec1) self.assertAlmostEqual(lib.finger(r1 ), -19368.729268465482, 8) self.assertAlmostEqual(lib.finger(r2[0]), 84325.863680611626 , 8) self.assertAlmostEqual(lib.finger(r2[1]), 6715.9574457836134 , 8) def test_eomee_diag2(self): vec1S, vec1T, vec2 = eom_rccsd.EOMEE(mycc21).get_diag() self.assertAlmostEqual(lib.finger(vec1S),-4714.9854130015719, 9) self.assertAlmostEqual(lib.finger(vec1T), 2221.3155272953709, 9) self.assertAlmostEqual(lib.finger(vec2) ,-5486.1611871545592, 9) def test_ip_matvec2(self): numpy.random.seed(12) r1 = numpy.random.random((no)) - .9 r2 = numpy.random.random((no,no,nv)) - .9 myeom = eom_rccsd.EOMIP(mycc21) vec = myeom.amplitudes_to_vector(r1,r2) r1,r2 = myeom.vector_to_amplitudes(vec) myeom.partition = 'mp' self.assertAlmostEqual(lib.finger(r1), 0.37404344676857076, 12) self.assertAlmostEqual(lib.finger(r2), -1.1568913404570922, 12) imds = myeom.make_imds(eris21) vec1 = myeom.matvec(vec, imds) self.assertAlmostEqual(lib.finger(vec1), -14894.669606811192, 9) self.assertAlmostEqual(lib.finger(myeom.get_diag()), 1182.3095479451745, 9) myeom.partition = 'full' imds = myeom.make_imds(eris21) diag = myeom.get_diag(imds) vec1 = myeom.matvec(vec, imds, diag=diag) self.assertAlmostEqual(lib.finger(vec1), -3795.9122245246967, 9) self.assertAlmostEqual(lib.finger(diag), 1106.260154202434, 9) def test_ea_matvec2(self): numpy.random.seed(12) r1 = numpy.random.random((nv)) - .9 r2 = numpy.random.random((no,nv,nv)) - .9 myeom = eom_rccsd.EOMEA(mycc21) vec = myeom.amplitudes_to_vector(r1,r2) r1,r2 = myeom.vector_to_amplitudes(vec) myeom.partition = 'mp' self.assertAlmostEqual(lib.finger(r1), 1.4488291275539353, 12) self.assertAlmostEqual(lib.finger(r2), 0.97080165032287469, 12) imds = myeom.make_imds(eris21) vec1 = myeom.matvec(vec, imds) self.assertAlmostEqual(lib.finger(vec1), -34426.363943760276, 9) self.assertAlmostEqual(lib.finger(myeom.get_diag()), 2724.8239646679217, 9) myeom.partition = 'full' imds = myeom.make_imds(eris21) diag = myeom.get_diag(imds) vec1 = myeom.matvec(vec, imds, diag=diag) self.assertAlmostEqual(lib.finger(vec1), -17030.363405297598, 9) self.assertAlmostEqual(lib.finger(diag), 4688.9122122011922, 9) ######################################## # With 4-fold symmetry in integrals # max_memory = 0 # direct = True def test_ipccsd3(self): e,v = mycc3.ipccsd(nroots=1) self.assertAlmostEqual(e, 0.4335604332073799, 6) e,v = mycc3.ipccsd(nroots=3) self.assertAlmostEqual(e[0], 0.4335604332073799, 6) self.assertAlmostEqual(e[1], 0.5187659896045407, 6) self.assertAlmostEqual(e[2], 0.6782876002229172, 6) myeom = eom_rccsd.EOMIP(mycc3) lv = myeom.ipccsd(nroots=3, left=True)[1] e = myeom.ipccsd_star_contract(e, v, lv) self.assertAlmostEqual(e[0], 0.43793202122290747, 6) self.assertAlmostEqual(e[1], 0.52287073076243218, 6) self.assertAlmostEqual(e[2], 0.67994597799835099, 6) def test_ipccsd_koopmans3(self): e,v = mycc3.ipccsd(nroots=3, koopmans=True) self.assertAlmostEqual(e[0], 0.4335604332073799, 6) self.assertAlmostEqual(e[1], 0.5187659896045407, 6) self.assertAlmostEqual(e[2], 0.6782876002229172, 6) e,v = mycc3.ipccsd(nroots=3, guess=v[:3]) self.assertAlmostEqual(e[0], 0.4335604332073799, 6) self.assertAlmostEqual(e[1], 0.5187659896045407, 6) self.assertAlmostEqual(e[2], 0.6782876002229172, 6) def test_ipccsd_partition3(self): e,v = mycc3.ipccsd(nroots=3, partition='mp') self.assertAlmostEqual(e[0], 0.42728862799879663, 6) self.assertAlmostEqual(e[1], 0.51359478811505332, 6) self.assertAlmostEqual(e[2], 0.67382901297144682, 6) e,v = mycc3.ipccsd(nroots=3, partition='full') self.assertAlmostEqual(e[0], 0.42291981842588938, 6) self.assertAlmostEqual(e[1], 0.50992428154417802, 6) self.assertAlmostEqual(e[2], 0.67006510349161119, 6) # def test_eaccsd3(self): # e,v = mycc3.eaccsd(nroots=1) # self.assertAlmostEqual(e, 0.16737886338859731, 6) # # e,v = mycc3.eaccsd(nroots=3) # self.assertAlmostEqual(e[0], 0.16737886338859731, 6) # self.assertAlmostEqual(e[1], 0.24027613852009164, 6) # self.assertAlmostEqual(e[2], 0.51006797826488071, 6) # # myeom = eom_rccsd.EOMEA(mycc3) # lv = myeom.eaccsd(nroots=3, left=True)[1] # e = myeom.eaccsd_star_contract(e, v, lv) # self.assertAlmostEqual(e[0], 0.16656250872624662, 6) # self.assertAlmostEqual(e[1], 0.2394414445283693, 6) # self.assertAlmostEqual(e[2], 0.41399434356202935, 6) # # def test_eaccsd_koopmans3(self): # e,v = mycc3.eaccsd(nroots=3, koopmans=True) # self.assertAlmostEqual(e[0], 0.16737886338859731, 6) # self.assertAlmostEqual(e[1], 0.24027613852009164, 6) # self.assertAlmostEqual(e[2], 0.73443352557582653, 6) # # e,v = mycc3.eaccsd(nroots=3, guess=v[:3]) # self.assertAlmostEqual(e[0], 0.16737886338859731, 6) # self.assertAlmostEqual(e[1], 0.24027613852009164, 6) # self.assertAlmostEqual(e[2], 0.73443352557582653, 6) # # def test_eaccsd_partition3(self): # e,v = mycc3.eaccsd(nroots=3, partition='mp') # self.assertAlmostEqual(e[0], 0.16947311575051136, 6) # self.assertAlmostEqual(e[1], 0.24234326468848749, 6) # self.assertAlmostEqual(e[2], 0.7434661346653969 , 6) # # e,v = mycc3.eaccsd(nroots=3, partition='full') # self.assertAlmostEqual(e[0], 0.16418276148493574, 6) # self.assertAlmostEqual(e[1], 0.23683978491376495, 6) # self.assertAlmostEqual(e[2], 0.55640091560545624, 6) def test_eeccsd3(self): e,v = mycc3.eeccsd(nroots=1) self.assertAlmostEqual(e, 0.2757159395886167, 6) e,v = mycc3.eeccsd(nroots=4) self.assertAlmostEqual(e[0], 0.2757159395886167, 6) self.assertAlmostEqual(e[1], 0.2757159395886167, 6) self.assertAlmostEqual(e[2], 0.2757159395886167, 6) self.assertAlmostEqual(e[3], 0.3005716731825082, 6) def test_eeccsd_koopmans3(self): e,v = mycc3.eeccsd(nroots=4, koopmans=True) self.assertAlmostEqual(e[0], 0.2757159395886167, 6) self.assertAlmostEqual(e[1], 0.2757159395886167, 6) self.assertAlmostEqual(e[2], 0.2757159395886167, 6) self.assertAlmostEqual(e[3], 0.3005716731825082, 6) e,v = mycc3.eeccsd(nroots=4, guess=v[:4]) self.assertAlmostEqual(e[0], 0.2757159395886167, 6) self.assertAlmostEqual(e[1], 0.2757159395886167, 6) self.assertAlmostEqual(e[2], 0.2757159395886167, 6) self.assertAlmostEqual(e[3], 0.3005716731825082, 6) def test_eomee_ccsd_matvec_singlet3(self): numpy.random.seed(10) r1 = numpy.random.random((no,nv)) - .9 r2 = numpy.random.random((no,no,nv,nv)) - .9 r2 = r2 + r2.transpose(1,0,3,2) myeom = eom_rccsd.EOMEESinglet(mycc31) vec = myeom.amplitudes_to_vector(r1,r2) imds = myeom.make_imds(eris31) vec1 = myeom.matvec(vec, imds) r1, r2 = myeom.vector_to_amplitudes(vec1) self.assertAlmostEqual(lib.finger(r1), -112883.3791497977, 8) self.assertAlmostEqual(lib.finger(r2), -268199.3475813322, 8) def test_eomee_ccsd_matvec_triplet3(self): numpy.random.seed(10) r1 = numpy.random.random((no,nv)) - .9 r2 = numpy.random.random((2,no,no,nv,nv)) - .9 r2[0] = r2[0] - r2[0].transpose(0,1,3,2) r2[0] = r2[0] - r2[0].transpose(1,0,2,3) r2[1] = r2[1] - r2[1].transpose(1,0,3,2) myeom = eom_rccsd.EOMEETriplet(mycc31) vec = myeom.amplitudes_to_vector(r1, r2) imds = myeom.make_imds(eris31) vec1 = myeom.matvec(vec, imds) r1, r2 = myeom.vector_to_amplitudes(vec1) self.assertAlmostEqual(lib.finger(r1 ), 3550.5250670914056, 9) self.assertAlmostEqual(lib.finger(r2[0]), -237433.03756895234,8) self.assertAlmostEqual(lib.finger(r2[1]), 127680.0182437716 , 8) def test_eomsf_ccsd_matvec3(self): numpy.random.seed(10) r1 = numpy.random.random((no,nv)) - .9 r2 = numpy.random.random((2,no,no,nv,nv)) - .9 myeom = eom_rccsd.EOMEESpinFlip(mycc31) vec = myeom.amplitudes_to_vector(r1,r2) imds = myeom.make_imds(eris31) vec1 = myeom.matvec(vec, imds) r1, r2 = myeom.vector_to_amplitudes(vec1) self.assertAlmostEqual(lib.finger(r1 ), -19368.729268465482, 8) self.assertAlmostEqual(lib.finger(r2[0]), 84325.863680611626 , 8) self.assertAlmostEqual(lib.finger(r2[1]), 6715.9574457836134 , 8) def test_eomee_diag3(self): vec1S, vec1T, vec2 = eom_rccsd.EOMEE(mycc31).get_diag() self.assertAlmostEqual(lib.finger(vec1S),-2881.6804563818432, 9) self.assertAlmostEqual(lib.finger(vec1T), 2039.7385969969259, 9) self.assertAlmostEqual(lib.finger(vec2) ,-4271.6230465236358, 9) def test_ip_matvec3(self): numpy.random.seed(12) r1 = numpy.random.random((no)) - .9 r2 = numpy.random.random((no,no,nv)) - .9 myeom = eom_rccsd.EOMIP(mycc31) vec = myeom.amplitudes_to_vector(r1,r2) r1,r2 = myeom.vector_to_amplitudes(vec) myeom.partition = 'mp' self.assertAlmostEqual(lib.finger(r1), 0.37404344676857076, 12) self.assertAlmostEqual(lib.finger(r2), -1.1568913404570922, 12) imds = myeom.make_imds(eris31) vec1 = myeom.matvec(vec, imds) self.assertAlmostEqual(lib.finger(vec1), -14894.669606811192, 9) self.assertAlmostEqual(lib.finger(myeom.get_diag()), 1182.3095479451745, 9) myeom.partition = 'full' imds = myeom.make_imds(eris31) diag = myeom.get_diag(imds) vec1 = myeom.matvec(vec, imds, diag=diag) self.assertAlmostEqual(lib.finger(vec1), -3795.9122245246967, 9) self.assertAlmostEqual(lib.finger(diag), 1106.260154202434, 9) def test_sort_left_right_eigensystem(self): myeom = eom_rccsd.EOMIP(mycc) right_evecs = [numpy.ones(10)] * 4 left_evecs = [numpy.ones(10)] * 5 right_evecs = [x*i for i, x in enumerate(right_evecs)] left_evecs = [x*i for i, x in enumerate(left_evecs)] revals, revecs, levecs = eom_rccsd._sort_left_right_eigensystem( myeom, [True, False, True, True], [-1.1, 0, 1.1, 2.2], right_evecs, [True, True, True, False, True], [-2.2, -1.1, 0, 1.1, 2.2], left_evecs) self.assertEqual(revals[0], -1.1) self.assertEqual(revals[1], 2.2) self.assertEqual(revecs[0][0], 0) self.assertEqual(revecs[1][0], 3) self.assertEqual(levecs[0][0], 1) self.assertEqual(levecs[1][0], 4) revals, revecs, levecs = eom_rccsd._sort_left_right_eigensystem( myeom, [True, False, True, True], [-1.1, 0, 1.1, 2.2], right_evecs, [True, True, False, True, True], [-2.2, -1.1, 0, 1.1, 2.2], left_evecs) self.assertEqual(revals[0], -1.1) self.assertEqual(revals[1], 1.1) self.assertEqual(revals[2], 2.2) self.assertEqual(revecs[0][0], 0) self.assertEqual(revecs[1][0], 2) self.assertEqual(revecs[2][0], 3) self.assertEqual(levecs[0][0], 1) self.assertEqual(levecs[1][0], 3) self.assertEqual(levecs[2][0], 4) # def test_ea_matvec3(self): # numpy.random.seed(12) # r1 = numpy.random.random((nv)) - .9 # r2 = numpy.random.random((no,nv,nv)) - .9 # myeom = eom_rccsd.EOMEA(mycc31) # vec = myeom.amplitudes_to_vector(r1,r2) # r1,r2 = myeom.vector_to_amplitudes(vec) # myeom.partition = 'mp' # self.assertAlmostEqual(lib.finger(r1), 1.4488291275539353, 12) # self.assertAlmostEqual(lib.finger(r2), 0.97080165032287469, 12) # imds = myeom.make_imds(eris31) # vec1 = myeom.matvec(vec, imds) # self.assertAlmostEqual(lib.finger(vec1), -34426.363943760276, 9) # self.assertAlmostEqual(lib.finger(myeom.get_diag()), 2724.8239646679217, 9) # # myeom.partition = 'full' # imds = myeom.make_imds(eris31) # diag = myeom.get_diag(imds) # vec1 = myeom.matvec(vec, imds, diag=diag) # self.assertAlmostEqual(lib.finger(vec1), -17030.363405297598, 9) # self.assertAlmostEqual(lib.finger(diag), 4688.9122122011922, 9) def test_t3p2_intermediates_complex(self): '''Although this has not been tested strictly for complex values, it was written to be correct for complex values and differences in the complex values between versions should be taken into account and corrected.''' myt1 = mycc1.t1 + 1j * numpy.sin(mycc1.t1) * mycc1.t1 myt2 = mycc1.t2 + 1j * numpy.sin(mycc1.t2) * mycc1.t2 myt2 = myt2 + myt2.transpose(1,0,3,2) e, pt1, pt2, Wmcik, Wacek = rintermediates.get_t3p2_imds_slow(mycc1, myt1, myt2, eris=erisi) self.assertAlmostEqual(lib.finger(e), 23223.465490572264, 6) self.assertAlmostEqual(lib.finger(pt1), (-5.2202836452466705-0.09570164571057749j), 6) self.assertAlmostEqual(lib.finger(pt2), (46.188012063609506-1.303867687778909j), 6) self.assertAlmostEqual(lib.finger(Wmcik), (-18.438930654297778+1.5734161307568773j), 6) self.assertAlmostEqual(lib.finger(Wacek), (-7.187576764072701+0.7399185332889747j), 6) def test_t3p2_intermediates_real(self): myt1 = mycc1.t1.copy() myt2 = mycc1.t2.copy() myt2 = myt2 + myt2.transpose(1,0,3,2) e, pt1, pt2, Wmcik, Wacek = rintermediates.get_t3p2_imds_slow(mycc1, myt1, myt2) self.assertAlmostEqual(lib.finger(e), 23230.479350851536, 6) self.assertAlmostEqual(lib.finger(pt1), -5.218888542335442, 6) self.assertAlmostEqual(lib.finger(pt2), 46.19512409958347, 6) self.assertAlmostEqual(lib.finger(Wmcik), -18.47928005593598, 6) self.assertAlmostEqual(lib.finger(Wacek), -7.101360230151883, 6) def test_t3p2_intermediates_against_so(self): from pyscf.cc.addons import convert_to_gccsd myt1 = mycc1.t1.copy() myt2 = mycc1.t2.copy() e, pt1, pt2, Wmcik, Wacek = rintermediates.get_t3p2_imds_slow(mycc1, myt1, myt2) mygcc = convert_to_gccsd(mycc1) mygt1 = mygcc.t1.copy() mygt2 = mygcc.t2.copy() ge, gpt1, gpt2, gWmcik, gWacek = gintermediates.get_t3p2_imds_slow(mygcc, mygt1, mygt2) self.assertAlmostEqual(lib.finger(pt1), -2.6094405706617727, 6) self.assertAlmostEqual(lib.finger(pt2), 23.097562049844235, 6) self.assertAlmostEqual(lib.finger(pt1), lib.finger(gpt1[::2,::2]), 6) self.assertAlmostEqual(lib.finger(pt2), lib.finger(gpt2[::2,1::2,::2,1::2]), 6) def test_h2o_star(self): mol_h2o = gto.Mole() mol_h2o.atom = [ [8, [0.000000000000000, -0.000000000000000, -0.124143731294022]], [1, [0.000000000000000, -1.430522735894536, 0.985125550040314]], [1, [0.000000000000000, 1.430522735894536, 0.985125550040314]]] mol_h2o.unit = 'B' mol_h2o.basis = {'H' : [[0, [5.4471780, 0.156285], [0.8245472, 0.904691]], [0, [0.1831916, 1.0]]], 'O' : '3-21G'} mol_h2o.verbose = 7 mol_h2o.output = '/dev/null' mol_h2o.build() mol.conv_tol = 1e-12 mf_h2o = scf.RHF(mol_h2o) mf_h2o.conv_tol_grad = 1e-12 mf_h2o.kernel() mycc_h2o = cc.RCCSD(mf_h2o).run() mycc_h2o.conv_tol_normt = 1e-12 mycc_h2o.conv_tol = 1e-12 mycc_h2o.kernel() myeom = eom_rccsd.EOMIP(mycc_h2o) e = myeom.ipccsd_star(nroots=3) self.assertAlmostEqual(e[0], 0.410661965883, 6) myeom = eom_rccsd.EOMIP_Ta(mycc_h2o) e = myeom.ipccsd_star(nroots=3) self.assertAlmostEqual(e[0], 0.411695647736, 6) myeom = eom_rccsd.EOMEA(mycc_h2o) e = myeom.eaccsd_star(nroots=3) self.assertAlmostEqual(e[0], 0.250589854185, 6) myeom = eom_rccsd.EOMEA_Ta(mycc_h2o) e = myeom.eaccsd_star(nroots=3) self.assertAlmostEqual(e[0], 0.250720295150, 6) if __name__ == "__main__": print("Tests for EOM RCCSD") unittest.main()
apache-2.0
ahu-odoo/odoo
addons/account/company.py
384
2814
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv class res_company(osv.osv): _inherit = "res.company" _columns = { 'expects_chart_of_accounts': fields.boolean('Expects a Chart of Accounts'), 'tax_calculation_rounding_method': fields.selection([ ('round_per_line', 'Round per Line'), ('round_globally', 'Round Globally'), ], 'Tax Calculation Rounding Method', help="If you select 'Round per Line' : for each tax, the tax amount will first be computed and rounded for each PO/SO/invoice line and then these rounded amounts will be summed, leading to the total amount for that tax. If you select 'Round Globally': for each tax, the tax amount will be computed for each PO/SO/invoice line, then these amounts will be summed and eventually this total tax amount will be rounded. If you sell with tax included, you should choose 'Round per line' because you certainly want the sum of your tax-included line subtotals to be equal to the total amount with taxes."), 'paypal_account': fields.char("Paypal Account", size=128, help="Paypal username (usually email) for receiving online payments."), 'overdue_msg': fields.text('Overdue Payments Message', translate=True), } _defaults = { 'expects_chart_of_accounts': True, 'tax_calculation_rounding_method': 'round_per_line', 'overdue_msg': '''Dear Sir/Madam, Our records indicate that some payments on your account are still due. Please find details below. If the amount has already been paid, please disregard this notice. Otherwise, please forward us the total amount stated below. If you have any queries regarding your account, Please contact us. Thank you in advance for your cooperation. Best Regards,''' } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
SalesforceFoundation/CumulusCI
cumulusci/tasks/salesforce/Deploy.py
1
5848
import pathlib from typing import Optional from cumulusci.core.exceptions import TaskOptionsError from cumulusci.core.utils import process_bool_arg, process_list_arg from cumulusci.salesforce_api.metadata import ApiDeploy from cumulusci.salesforce_api.package_zip import MetadataPackageZipBuilder from cumulusci.tasks.salesforce.BaseSalesforceMetadataApiTask import ( BaseSalesforceMetadataApiTask, ) class Deploy(BaseSalesforceMetadataApiTask): api_class = ApiDeploy task_options = { "path": { "description": "The path to the metadata source to be deployed", "required": True, }, "unmanaged": { "description": "If True, changes namespace_inject to replace tokens with a blank string" }, "namespace_inject": { "description": "If set, the namespace tokens in files and filenames are replaced with the namespace's prefix" }, "namespace_strip": { "description": "If set, all namespace prefixes for the namespace specified are stripped from files and filenames" }, "check_only": { "description": "If True, performs a test deployment (validation) of components without saving the components in the target org" }, "test_level": { "description": "Specifies which tests are run as part of a deployment. Valid values: NoTestRun, RunLocalTests, RunAllTestsInOrg, RunSpecifiedTests." }, "specified_tests": { "description": "Comma-separated list of test classes to run upon deployment. Applies only with test_level set to RunSpecifiedTests." }, "static_resource_path": { "description": "The path where decompressed static resources are stored. Any subdirectories found will be zipped and added to the staticresources directory of the build." }, "namespaced_org": { "description": "If True, the tokens %%%NAMESPACED_ORG%%% and ___NAMESPACED_ORG___ will get replaced with the namespace. The default is false causing those tokens to get stripped and replaced with an empty string. Set this if deploying to a namespaced scratch org or packaging org." }, "clean_meta_xml": { "description": "Defaults to True which strips the <packageVersions/> element from all meta.xml files. The packageVersion element gets added automatically by the target org and is set to whatever version is installed in the org. To disable this, set this option to False" }, } namespaces = {"sf": "http://soap.sforce.com/2006/04/metadata"} def _init_options(self, kwargs): super(Deploy, self)._init_options(kwargs) self.check_only = process_bool_arg(self.options.get("check_only", False)) self.test_level = self.options.get("test_level") if self.test_level and self.test_level not in [ "NoTestRun", "RunLocalTests", "RunAllTestsInOrg", "RunSpecifiedTests", ]: raise TaskOptionsError( f"Specified test run level {self.test_level} is not valid." ) self.specified_tests = process_list_arg(self.options.get("specified_tests", [])) if bool(self.specified_tests) != (self.test_level == "RunSpecifiedTests"): raise TaskOptionsError( "The specified_tests option and test_level RunSpecifiedTests must be used together." ) self.options["namespace_inject"] = ( self.options.get("namespace_inject") or self.project_config.project__package__namespace ) def _get_api(self, path=None): if not path: path = self.options.get("path") package_zip = self._get_package_zip(path) if package_zip is not None: self.logger.info("Payload size: {} bytes".format(len(package_zip))) else: self.logger.warning("Deployment package is empty; skipping deployment.") return return self.api_class( self, package_zip, purge_on_delete=False, check_only=self.check_only, test_level=self.test_level, run_tests=self.specified_tests, ) def _has_namespaced_package(self, ns: Optional[str]) -> bool: if "unmanaged" in self.options: return not process_bool_arg(self.options.get("unmanaged", True)) return bool(ns) and ns in self.org_config.installed_packages def _is_namespaced_org(self, ns: Optional[str]) -> bool: if "namespaced_org" in self.options: return process_bool_arg(self.options.get("namespaced_org", False)) return bool(ns) and ns == self.org_config.namespace def _get_package_zip(self, path): assert path, f"Path should be specified for {self.__class__.name}" if not pathlib.Path(path).exists(): self.logger.warning(f"{path} not found.") return namespace = self.options["namespace_inject"] options = { **self.options, "clean_meta_xml": process_bool_arg( self.options.get("clean_meta_xml", True) ), "namespace_inject": namespace, "unmanaged": not self._has_namespaced_package(namespace), "namespaced_org": self._is_namespaced_org(namespace), } package_zip = MetadataPackageZipBuilder( path=path, options=options, logger=self.logger ) if not package_zip.zf.namelist(): return return package_zip.as_base64() def freeze(self, step): steps = super(Deploy, self).freeze(step) for step in steps: if step["kind"] == "other": step["kind"] = "metadata" return steps
bsd-3-clause
mapr/sahara
sahara/cli/sahara_api.py
2
1907
#!/usr/bin/env python # Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara.utils import patches patches.patch_all() import os import sys import eventlet from eventlet import wsgi from oslo import i18n # If ../sahara/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, 'sahara', '__init__.py')): sys.path.insert(0, possible_topdir) # NOTE(slukjanov): i18n.enable_lazy() must be called before # sahara.utils.i18n._() is called to ensure it has the desired # lazy lookup behavior. i18n.enable_lazy() import sahara.main as server from sahara.openstack.common import log as logging LOG = logging.getLogger(__name__) def main(): server.setup_common(possible_topdir, 'API') app = server.make_app() server.setup_sahara_api('distributed') from oslo.config import cfg wsgi.server(eventlet.listen((cfg.CONF.host, cfg.CONF.port), backlog=500), app, log=logging.WritableLogger(LOG), debug=False)
apache-2.0
KredekPth/Kurs_django
website/urls.py
1
1151
"""website URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.11/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import include, url from django.contrib import admin from shelf.views import AuthorListView, AuthorDetailView from shelf.views import BookListView from contact.views import MessageAddView from shelf.views import MainPageView app_name = 'shelf' urlpatterns = [ url(r'^admin/', include(admin.site.urls)), url(r'^shelf/',include('shelf.urls',namespace = 'shelf')), url(r'^contact/$', MessageAddView.as_view()), url(r'^$',MainPageView.as_view(),name = 'main-page'), ]
mit
lukeiwanski/tensorflow-opencl
tensorflow/python/training/localhost_cluster_performance_test.py
50
5457
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests and benchmarks for creating RPC clusters on localhost.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import time import numpy as np import portpicker from tensorflow.python.client import session as session_lib from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import partitioned_variables from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training import device_setter from tensorflow.python.training import server_lib def create_local_cluster(num_workers, num_ps, protocol="grpc"): """Create local GRPC servers and return their servers.""" worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)] ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)] cluster_dict = { "worker": ["localhost:%s" % port for port in worker_ports], "ps": ["localhost:%s" % port for port in ps_ports] } cs = server_lib.ClusterSpec(cluster_dict) workers = [ server_lib.Server( cs, job_name="worker", protocol=protocol, task_index=ix, start=True) for ix in range(num_workers) ] ps_servers = [ server_lib.Server( cs, job_name="ps", protocol=protocol, task_index=ix, start=True) for ix in range(num_ps) ] return workers, ps_servers class CreateLocalClusterTest(test.TestCase): def testCreateLocalCluster(self): workers, _ = create_local_cluster(num_workers=2, num_ps=2) worker_sessions = [session_lib.Session(w.target) for w in workers] with ops.device("/job:ps/task:0"): var0 = variables.Variable(0.0) with ops.device("/job:ps/task:1"): var1 = variables.Variable(1.0) worker_sessions[0].run([var0.initializer, var1.initializer]) with ops.device("/job:ps/task:0"): var2 = variables.Variable(2.0) with ops.device("/job:ps/task:1"): var3 = variables.Variable(3.0) worker_sessions[1].run([var2.initializer, var3.initializer]) # Read values back in the opposite session self.assertAllEqual(0.0, var0.eval(session=worker_sessions[1])) self.assertAllEqual(1.0, var1.eval(session=worker_sessions[1])) self.assertAllEqual(2.0, var2.eval(session=worker_sessions[0])) self.assertAllEqual(3.0, var3.eval(session=worker_sessions[0])) class CreateLocalClusterBenchmark(test.Benchmark): def benchmarkCreateLocalCluster(self): deltas = [] iters = 5 for _ in range(iters): start_time = time.time() create_local_cluster(num_workers=1, num_ps=10) end_time = time.time() deltas.append(end_time - start_time) median_deltas = np.median(deltas) print("\n\nbenchmark_create_local_cluster_1_worker_10_ps. " "iterations: %d, median wall time: %g\n\n" % (iters, median_deltas)) self.report_benchmark( iters=iters, wall_time=median_deltas, name="benchmark_create_local_cluster_1_worker_10_ps") class PartitionedVariablesBenchmark(test.Benchmark): def benchmark_create_1000_partitions_with_100_parameter_servers(self): workers, _ = create_local_cluster(num_workers=1, num_ps=100) worker_sessions = [session_lib.Session(w.target) for w in workers] worker = worker_sessions[0] partition_sizes = (1, 512, 1024 * 32, 1024 * 128) partitioned = [] for partition_size in partition_sizes: # max_shard_bytes is 4, shape is 1000*partition_size float32s which should # partition into 1000 shards, each containing partition_size float32s. print("Building partitioned variable with %d floats per partition" % partition_size) with ops.device(device_setter.replica_device_setter(ps_tasks=100)): partitioned_ix = variable_scope.get_variable( "partitioned_%d" % partition_size, shape=[1000 * partition_size], dtype=dtypes.float32, # Each partition to have exactly N float32s partitioner=partitioned_variables.variable_axis_size_partitioner( max_shard_bytes=4 * partition_size)) # Concatenates along axis 0 partitioned.append(ops.convert_to_tensor(partitioned_ix)) variables.global_variables_initializer().run(session=worker) for ix, partition_size in enumerate(partition_sizes): print("Running benchmark having partitions with %d floats" % partition_size) self.run_op_benchmark( worker, partitioned[ix], name=("read_concat_1000_partitions_from_" "100_parameter_servers_partsize_%d_floats" % partition_size)) if __name__ == "__main__": test.main()
apache-2.0
aisthesis/opttrack
opttrack/lib/ui/edit_handlers.py
1
10897
""" Copyright (c) 2015 Marshall Farrier license http://opensource.org/licenses/MIT lib/ui/handlers.py Handlers for edit menu """ from bson.codec_options import CodecOptions import datetime as dt from functools import partial import json from pymongo.errors import BulkWriteError from ..dbschema import SPREADS from ..dbtools import delete_many, find_job, getcoll, insert_many from ..dbwrapper import job from ..spreads.optspread import SPREAD_TYPES from ..spreads.optspread_factory import OptSpreadFactory from .spread_ui import SpreadUi from .utils import confirm class EditHandlers(object): def __init__(self, logger, tz): self.logger = logger self.tz = tz def add_obs(self, spread_type): spread = SpreadUi().get(spread_type) if not spread: print('\nAborting: spread NOT saved!') return True job(self.logger, partial(_saveentries, (vars(spread),), 'observe')) return True def del_obs(self, spread_type): underlying = input('Underlying: ').strip().upper() wrapped_spreads = self._get_observed({'Underlying': underlying, 'Spread_Type': spread_type}) if len(wrapped_spreads) == 0: print('\nNo {} spreads found for {}'.format(SPREAD_TYPES[spread_type], underlying)) else: self._del_obs(wrapped_spreads) return True def show_obs(self, spread_type): wrapped_spreads = self._get_observed({'Spread_Type': spread_type}) if not len(wrapped_spreads): print('\nNo {} spreads found.'.format(SPREAD_TYPES[spread_type])) for item in wrapped_spreads: print('') item['spread'].show(False, False, False) return True def add_find(self, spread_type): if _is_fromfile(): fname = input('Enter file name: ').strip() equities = _eqs_fromfile(fname) else: equities = _eqs_fromblob(input('Underlying equities (GOOGL,TSLA,FB): ')) print('Include in future scans:\n') for eq in equities: print("'{}'".format(eq)) choice = input('\nOK to proceed (y/n)? ').lower() if choice == 'y': entries = _get_find_entries(equities, spread_type) job(self.logger, partial(_saveentries, entries, 'find')) else: print('Aborting: equities NOT saved!') return True def del_find(self, spread_type): equities = _eqs_fromblob(input('Underlying equities (GOOGL,TSLA,FB): ')) print('Remove from future scans:\n') for eq in equities: print("'{}'".format(eq)) choice = input('\nOK to proceed (y/n)? ').lower() if choice == 'y': entries = _get_find_entries(equities, spread_type) job(self.logger, partial(_delentries, entries, 'find')) else: print('Aborting: equities NOT deleted!') return True def show_find(self): for spread in SPREADS: cursor = job(self.logger, partial(find_job, 'find', {'spread': spread['key']})) equities = sorted([item['eq'] for item in cursor]) print('\n{}:'.format(spread['desc'])) if len(equities) > 0: print('{} equities are being scanned'.format(len(equities))) for equity in equities: print("'{}'".format(equity)) else: print('No equities are being scanned') return True def track_single(self): entry = self._get_track_entry() self._confirmsave((entry,)) return True def track_dgb(self): print('\nTrack diagonal butterfly:') underlying = input('Underlying equity: ').strip().upper() straddleexp = self._getexpdt(input('Straddle expiration (yyyy-mm-dd): ')) straddlestrike = float(input('Straddle strike: ')) farexp = self._getexpdt(input('Far expiration (yyyy-mm-dd): ')) distance = float(input('Distance between strikes: ')) entries = _get_dgbentries(underlying, straddleexp, straddlestrike, farexp, distance) self._confirmsave(entries) return True def delete_tracked(self): entry = self._get_track_entry() self._confirmdelete(entry) return True def show_tracked(self): underlying = input('Underlying equity: ').strip().upper() job(self.logger, partial(_show_tracked, self.tz, underlying)) return True def _del_obs(self, wrapped_spreads): if len(wrapped_spreads) == 1: self._del_obs_unique(wrapped_spreads[0]) else: self._del_obs_select(wrapped_spreads) def _del_obs_unique(self, wrapped_spread): print('\nStop observing the following spread:\n') wrapped_spread['spread'].show(False, False, False) print('') if confirm(): job(self.logger, partial(_delentries, ({'_id': wrapped_spread['_id']},), 'observe')) else: print('\nAborting: spread NOT deleted!') def _del_obs_select(self, wrapped_spreads): print('Multiple {} spreads found for {}.'.format(SPREAD_TYPES[wrapped_spreads[0]['spread'].Spread_Type], wrapped_spreads[0]['spread'].Underlying)) print('Select spread to delete:') for i in range(len(wrapped_spreads)): print('\n({})'.format(i + 1)) wrapped_spreads[i]['spread'].show(False, False, False) choice = int(input('\nEnter number for spread to delete: ')) if not 0 < choice <= len(wrapped_spreads): print('\nInvalid selection!') return self._del_obs_unique(wrapped_spreads[choice - 1]) def _get_track_entry(self): entry = {} entry['Underlying'] = input('Underlying equity: ').strip().upper() entry['Opt_Type'] = _getopttype(input('Option type (c[all] or p[ut]): ')) entry['Expiry'] = self._getexpdt(input('Expiration (yyyy-mm-dd): ')) entry['Strike'] = float(input('Strike: ')) return entry def _confirmsave(self, entries): print('\nSaving the following options:') _show_track_entries(entries) choice = input('\nOK to proceed (y/n)? ').lower() if choice == 'y': job(self.logger, partial(_saveentries, entries, 'track')) else: print('Aborting: option(s) NOT saved!') def _confirmdelete(self, entry): print('\nDeleting the following option:') _show_track_entries((entry,)) choice = input('\nStop tracking this option (y/n)? ').lower() if choice == 'y': job(self.logger, partial(_delentries, (entry,), 'track')) else: print('Aborting: option NOT deleted!') def _get_observed(self, qry): spread_factory = OptSpreadFactory(self.tz) cursor = job(self.logger, partial(find_job, 'observe', qry, codec_options=CodecOptions(tz_aware=True))) wrapped_spreads = [] for item in cursor: wrapped_spreads.append({'spread': spread_factory.make(item), '_id': item['_id']}) return wrapped_spreads def _getexpdt(self, expirytxt): # on 2016-02-19 expired options were unavailable on yahoo by 7:30 pm EST return self.tz.localize(dt.datetime.strptime(expirytxt, '%Y-%m-%d')).replace(hour=19) def _getopttype(rawtxt): if rawtxt.strip().lower() in ('c', 'call'): return 'call' if rawtxt.strip().lower() in ('p', 'put'): return 'put' raise ValueError('option type must be call or put') def _show_track_entries(entries): for entry in entries: print('') _show_track_entry(entry) def _show_track_entry(entry): print('Underlying: {}'.format(entry['Underlying'])) print('Opt_Type: {}'.format(entry['Opt_Type'])) print('Expiry: {}'.format(entry['Expiry'].strftime('%Y-%m-%d'))) print('Strike: {:.2f}'.format(entry['Strike'])) def _delentries(entries, collname, logger, client): logger.info("removing {} record(s) from collection '{}'".format(len(entries), collname)) coll = getcoll(client, collname) total_deleted = 0 for entry in entries: n_deleted = delete_many(logger, coll, entry) if n_deleted < 1: logger.warn('record to be deleted not found: {}'.format(entry)) total_deleted += n_deleted if total_deleted == len(entries): msg = '{} record(s) deleted'.format(total_deleted) print(msg) else: msg = '{} records queued for deletion but {} records were deleted!'.format(len(entries), total_deleted) logger.warn(msg) print('WARNING: {}'.format(msg)) print('Did you verify that the records to be deleted were actually present?') def _saveentries(entries, collname, logger, client): msg = 'Saving {} entries'.format(len(entries)) print(msg) logger.info(msg) coll = getcoll(client, collname) try: n_inserted = insert_many(logger, coll, entries) except BulkWriteError: print('\nERROR writing to database! Entries not saved!') print('Are you trying to enter duplicate records?') else: print('{} records saved'.format(n_inserted)) def _show_tracked(tz, underlying, logger, client): c_opts = CodecOptions(tz_aware=True) trackcoll = getcoll(client, 'track', codec_options=c_opts) print('\nEntries for {}:\n'.format(underlying)) for record in trackcoll.find({'Underlying': underlying}): _show_tracked_record(tz, record) def _show_tracked_record(tz, record): print('Opt_Type: {}'.format(record['Opt_Type'])) print('Expiry: {}'.format(record['Expiry'].astimezone(tz).strftime('%Y-%m-%d'))) print('Strike: {:.2f}\n'.format(record['Strike'])) def _get_dgbentries(underlying, straddleexp, straddlestrike, farexp, distance): entries = [] farstrikes = {'call': straddlestrike + distance, 'put': straddlestrike - distance} for key in farstrikes: # straddle entries.append({'Underlying': underlying, 'Opt_Type': key, 'Expiry': straddleexp, 'Strike': straddlestrike}) # long-term spread entries.append({'Underlying': underlying, 'Opt_Type': key, 'Expiry': farexp, 'Strike': farstrikes[key]}) return entries def _is_fromfile(): if input('Get list from file, 1 equity per line (y/n)? ').strip().lower() == 'y': return True return False def _eqs_fromblob(eqblob): return sorted(map(_fmt_eq, eqblob.split(','))) def _fmt_eq(rawtxt): return rawtxt.strip().upper() def _eqs_fromfile(fname): equities = [] with open(fname, 'r') as infile: equities = infile.readlines() return sorted(map(_fmt_eq, equities)) def _get_find_entries(equities, spread_type): return [{'eq': equity, 'spread': spread_type} for equity in equities]
mit
loljoho-old/ainu
pyaib/config.py
2
3302
#!/usr/bin/env python # # Copyright 2013 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import (absolute_import, division, print_function, unicode_literals) import sys import os import yaml from .util import data class Config(object): def __init__(self, configFile=None, configPath=None): print("Config Module Loaded.") if configFile is None: raise RuntimeError("YOU MUST PASS 'configFile' DURING BOT INIT") (config, searchpaths) = self.__load(configFile, configPath) if config is None: msg = ("You need a valid main config (searchpaths: %s)" % searchpaths) raise RuntimeError(msg) #Wrap the config dict self.config = data.CaseInsensitiveObject(config) #Files can be loaded from the 'CONFIG' section #Load the load statement if any for section, file in self.config.setdefault('config.load', {}).items(): config = self.__load(file, [configPath, self.config.get('config.path')]) #Badly syntax configs will be empty if config is None: config = {} self.config.set(section, config) #Attempt to load a config file name print exceptions def __load(self, configFile, path=None): data = None (filepath, searchpaths) = self.__findfile(configFile, path) if filepath: # If the file is found lets try to load it try: data = yaml.safe_load(file(filepath, 'r')) print("Loaded Config from %s." % configFile) except yaml.YAMLError, exc: print("Error in configuration file (%s): %s" % (filepath, exc)) if hasattr(exc, 'problem_mark'): mark = exc.problem_mark print("Error position: (%s:%s)" % (mark.line + 1, mark.column + 1)) return (data, searchpaths) #Find the requested file in the path (for PARs) #If configFile is a list then do lookup for each #First Found is returned def __findfile(self, configFile, path=None): searchpaths = [] if isinstance(path, list): searchpaths.extend(path) # Optional Config path elif path: searchpaths.append(path) searchpaths.extend(sys.path) for path in searchpaths: if not os.path.isdir(path): path = os.path.dirname(path) if os.path.isdir(path): for root, dirs, files in os.walk(path): if configFile in files: return (os.path.join(root, configFile), searchpaths) return (None, searchpaths)
apache-2.0
gogoair/foremast
tests/iam/test_iam_create.py
1
4229
# Foremast - Pipeline Tooling # # Copyright 2018 Gogo, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test IAM Policy construction.""" import json from unittest import mock from foremast.iam.create_iam import create_iam_resources from foremast.utils import get_template EC2_TEMPLATE_NAME = 'infrastructure/iam/trust/ec2_role.json.j2' LAMBDA_TEMPLATE_NAME = 'infrastructure/iam/trust/lambda_role.json.j2' @mock.patch('foremast.iam.create_iam.attach_profile_to_role') @mock.patch('foremast.iam.create_iam.boto3.session.Session') @mock.patch('foremast.iam.create_iam.construct_policy') @mock.patch('foremast.iam.create_iam.get_details') @mock.patch('foremast.iam.create_iam.get_properties') @mock.patch('foremast.iam.create_iam.resource_action') def test_create_iam_resources(resource_action, get_properties, get_details, construct_policy, session, attach_profile_to_role): """Check basic functionality.""" get_details.return_value.iam.return_value = {'group': 1, 'policy': 2, 'profile': 3, 'role': 4, 'user': 5} get_properties.return_value = {'type': 'ec2'} assert create_iam_resources(env='narnia', app='lion/aslan') assert resource_action.call_count == 6 session.assert_called_with(profile_name='narnia') get_details.assert_called_with(env='narnia', app='lion/aslan') get_properties.assert_called_with(env='pipeline') construct_policy.assert_called_with( app='lion/aslan', group=1, env='narnia', pipeline_settings=get_properties.return_value) @mock.patch('foremast.iam.create_iam.attach_profile_to_role') @mock.patch('foremast.iam.create_iam.boto3.session.Session') @mock.patch('foremast.iam.create_iam.construct_policy') @mock.patch('foremast.iam.create_iam.get_details') @mock.patch('foremast.iam.create_iam.get_properties') @mock.patch('foremast.iam.create_iam.get_template') @mock.patch('foremast.iam.create_iam.resource_action') def test_iam_role_policy(resource_action, get_template, get_properties, get_details, construct_policy, session, attach_profile_to_role): """IAM Role Policy should match deployment type.""" get_properties.return_value = {'type': 'ec2'} get_details.return_value.iam.return_value = {'group': 1, 'policy': 2, 'profile': 3, 'role': 4, 'user': 5} assert create_iam_resources() get_template.assert_called_with(EC2_TEMPLATE_NAME, formats=get_details()) calls = [ mock.call( mock.ANY, action='create_role', log_format=mock.ANY, RoleName=mock.ANY, AssumeRolePolicyDocument=get_template.return_value) ] resource_action.assert_has_calls(calls) def test_ec2_iam_policy(): """Check template for proper format.""" ec2_json = get_template(EC2_TEMPLATE_NAME) ec2_dict = json.loads(ec2_json) assert all(key in ec2_dict for key in ('Version', 'Statement')) assert len(ec2_dict['Statement']) == 1 only_statement = ec2_dict['Statement'][0] assert only_statement['Action'] == 'sts:AssumeRole' assert only_statement['Effect'] == 'Allow' assert only_statement['Principal']['Service'] == 'ec2.amazonaws.com' def test_lambda_iam_policy(): """Check Lambda Trust Relationship template format.""" lambda_json = get_template(LAMBDA_TEMPLATE_NAME) lambda_dict = json.loads(lambda_json) assert all(key in lambda_dict for key in ('Version', 'Statement')) assert len(lambda_dict['Statement']) == 1 only_statement = lambda_dict['Statement'][0] assert only_statement['Action'] == 'sts:AssumeRole' assert only_statement['Effect'] == 'Allow' assert only_statement['Principal']['Service'] == 'lambda.amazonaws.com'
apache-2.0
tiagochiavericosta/edx-platform
lms/djangoapps/courseware/features/conditional.py
102
4723
# pylint: disable=missing-docstring # pylint: disable=redefined-outer-name from lettuce import world, steps from nose.tools import assert_in, assert_true # pylint: disable=no-name-in-module from common import i_am_registered_for_the_course, visit_scenario_item from problems_setup import add_problem_to_course, answer_problem @steps class ConditionalSteps(object): COURSE_NUM = 'test_course' def setup_conditional(self, step, condition_type, condition, cond_value): r'that a course has a Conditional conditioned on (?P<condition_type>\w+) (?P<condition>\w+)=(?P<cond_value>\w+)$' i_am_registered_for_the_course(step, self.COURSE_NUM) world.scenario_dict['VERTICAL'] = world.ItemFactory( parent_location=world.scenario_dict['SECTION'].location, category='vertical', display_name="Test Vertical", ) world.scenario_dict['WRAPPER'] = world.ItemFactory( parent_location=world.scenario_dict['VERTICAL'].location, category='wrapper', display_name="Test Poll Wrapper" ) if condition_type == 'problem': world.scenario_dict['CONDITION_SOURCE'] = add_problem_to_course(self.COURSE_NUM, 'string') elif condition_type == 'poll': world.scenario_dict['CONDITION_SOURCE'] = world.ItemFactory( parent_location=world.scenario_dict['WRAPPER'].location, category='poll_question', display_name='Conditional Poll', data={ 'question': 'Is this a good poll?', 'answers': [ {'id': 'yes', 'text': 'Yes, of course'}, {'id': 'no', 'text': 'Of course not!'} ], } ) else: raise Exception("Unknown condition type: {!r}".format(condition_type)) metadata = { 'xml_attributes': { condition: cond_value } } world.scenario_dict['CONDITIONAL'] = world.ItemFactory( parent_location=world.scenario_dict['WRAPPER'].location, category='conditional', display_name="Test Conditional", metadata=metadata, sources_list=[world.scenario_dict['CONDITION_SOURCE'].location], ) world.ItemFactory( parent_location=world.scenario_dict['CONDITIONAL'].location, category='html', display_name='Conditional Contents', data='<html><div class="hidden-contents">Hidden Contents</p></html>' ) def setup_problem_attempts(self, step, not_attempted=None): r'that the conditioned problem has (?P<not_attempted>not )?been attempted$' visit_scenario_item('CONDITION_SOURCE') if not_attempted is None: answer_problem(self.COURSE_NUM, 'string', True) world.css_click("button.check") def when_i_view_the_conditional(self, step): r'I view the conditional$' visit_scenario_item('CONDITIONAL') world.wait_for_js_variable_truthy('$(".xblock-student_view[data-type=Conditional]").data("initialized")') def check_visibility(self, step, visible): r'the conditional contents are (?P<visible>\w+)$' world.wait_for_ajax_complete() assert_in(visible, ('visible', 'hidden')) if visible == 'visible': world.wait_for_visible('.hidden-contents') assert_true(world.css_visible('.hidden-contents')) else: assert_true(world.is_css_not_present('.hidden-contents')) assert_true( world.css_contains_text( '.conditional-message', 'must be attempted before this will become visible.' ) ) def answer_poll(self, step, answer): r' I answer the conditioned poll "([^"]*)"$' visit_scenario_item('CONDITION_SOURCE') world.wait_for_js_variable_truthy('$(".xblock-student_view[data-type=Poll]").data("initialized")') world.wait_for_ajax_complete() answer_text = [ poll_answer['text'] for poll_answer in world.scenario_dict['CONDITION_SOURCE'].answers if poll_answer['id'] == answer ][0] text_selector = '.poll_answer .text' poll_texts = world.retry_on_exception( lambda: [elem.text for elem in world.css_find(text_selector)] ) for idx, poll_text in enumerate(poll_texts): if poll_text == answer_text: world.css_click(text_selector, index=idx) return ConditionalSteps()
agpl-3.0
krother/maze_run
03_semantic_errors/generate_maze.py
4
1783
# Code for chapter 03 - Semantic Errors import random XMAX, YMAX = 19, 16 def create_grid_string(dots, xsize, ysize): """ Creates a grid of size (xx, yy) with the given positions of dots. """ grid = "" for y in range(ysize): for x in range(xsize): grid += "." if (x, y) in dots else "#" grid += "\n" return grid def get_all_dot_positions(xsize, ysize): """Returns a list of (x, y) tuples covering all positions in a grid""" return [(x, y) for x in range(1, xsize - 1) for y in range(1, ysize - 1)] def get_neighbors(x, y): """Returns a list with the 8 neighbor positions of (x, y)""" return [ (x, y-1), (x, y+1), (x-1, y), (x+1, y), (x-1, y-1), (x+1, y-1), (x-1, y+1), (x+1, y+1) ] def generate_dot_positions(xsize, ysize): """Creates positions of dots for a random maze""" positions = get_all_dot_positions(xsize, ysize) dots = set() while positions != []: x, y = random.choice(positions) neighbors = get_neighbors(x, y) free = [nb in dots for nb in neighbors] if free.count(True) < 5: dots.add((x, y)) positions.remove((x, y)) return dots def create_maze(xsize, ysize): """Returns a xsize*ysize maze as a string""" dots = generate_dot_positions(xsize, ysize) maze = create_grid_string(dots, xsize, ysize) return maze if __name__ == '__main__': dots = set(((1, 1), (1, 2), (1, 3), (2, 2), (3, 1), (3, 2), (3, 3))) print(create_grid_string(dots, 5, 5)) positions = get_all_dot_positions(5, 5) print(create_grid_string(positions, 5, 5)) neighbors = get_neighbors(3, 2) print(create_grid_string(neighbors, 5, 5)) maze = create_maze(12, 7) print(maze)
mit
joone/chromium-crosswalk
tools/idl_parser/idl_ppapi_lexer.py
81
2012
#!/usr/bin/env python # Copyright (c) 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Lexer for PPAPI IDL The lexer uses the PLY library to build a tokenizer which understands both WebIDL and Pepper tokens. WebIDL, and WebIDL regular expressions can be found at: http://www.w3.org/TR/2012/CR-WebIDL-20120419/ PLY can be found at: http://www.dabeaz.com/ply/ """ from idl_lexer import IDLLexer # # IDL PPAPI Lexer # class IDLPPAPILexer(IDLLexer): # Token definitions # # These need to be methods for lexer construction, despite not using self. # pylint: disable=R0201 # Special multi-character operators def t_LSHIFT(self, t): r'<<' return t def t_RSHIFT(self, t): r'>>' return t def t_INLINE(self, t): r'\#inline (.|\n)*?\#endinl.*' self.AddLines(t.value.count('\n')) return t # Return a "preprocessor" inline block def __init__(self): IDLLexer.__init__(self) self._AddTokens(['INLINE', 'LSHIFT', 'RSHIFT']) self._AddKeywords(['label', 'struct']) # Add number types self._AddKeywords(['char', 'int8_t', 'int16_t', 'int32_t', 'int64_t']) self._AddKeywords(['uint8_t', 'uint16_t', 'uint32_t', 'uint64_t']) self._AddKeywords(['double_t', 'float_t']) # Add handle types self._AddKeywords(['handle_t', 'PP_FileHandle']) # Add pointer types (void*, char*, const char*, const void*) self._AddKeywords(['mem_t', 'str_t', 'cstr_t', 'interface_t']) # Remove JS types self._DelKeywords(['boolean', 'byte', 'ByteString', 'Date', 'DOMString', 'double', 'float', 'long', 'object', 'octet', 'Promise', 'RegExp', 'short', 'unsigned']) # If run by itself, attempt to build the lexer if __name__ == '__main__': lexer = IDLPPAPILexer() lexer.Tokenize(open('test_parser/inline_ppapi.idl').read()) for tok in lexer.GetTokens(): print '\n' + str(tok)
bsd-3-clause
kumajaya/android_kernel_samsung_espresso10
tools/perf/scripts/python/sched-migration.py
11215
11670
#!/usr/bin/python # # Cpu task migration overview toy # # Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com> # # perf script event handlers have been generated by perf script -g python # # This software is distributed under the terms of the GNU General # Public License ("GPL") version 2 as published by the Free Software # Foundation. import os import sys from collections import defaultdict from UserList import UserList sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from SchedGui import * threads = { 0 : "idle"} def thread_name(pid): return "%s:%d" % (threads[pid], pid) class RunqueueEventUnknown: @staticmethod def color(): return None def __repr__(self): return "unknown" class RunqueueEventSleep: @staticmethod def color(): return (0, 0, 0xff) def __init__(self, sleeper): self.sleeper = sleeper def __repr__(self): return "%s gone to sleep" % thread_name(self.sleeper) class RunqueueEventWakeup: @staticmethod def color(): return (0xff, 0xff, 0) def __init__(self, wakee): self.wakee = wakee def __repr__(self): return "%s woke up" % thread_name(self.wakee) class RunqueueEventFork: @staticmethod def color(): return (0, 0xff, 0) def __init__(self, child): self.child = child def __repr__(self): return "new forked task %s" % thread_name(self.child) class RunqueueMigrateIn: @staticmethod def color(): return (0, 0xf0, 0xff) def __init__(self, new): self.new = new def __repr__(self): return "task migrated in %s" % thread_name(self.new) class RunqueueMigrateOut: @staticmethod def color(): return (0xff, 0, 0xff) def __init__(self, old): self.old = old def __repr__(self): return "task migrated out %s" % thread_name(self.old) class RunqueueSnapshot: def __init__(self, tasks = [0], event = RunqueueEventUnknown()): self.tasks = tuple(tasks) self.event = event def sched_switch(self, prev, prev_state, next): event = RunqueueEventUnknown() if taskState(prev_state) == "R" and next in self.tasks \ and prev in self.tasks: return self if taskState(prev_state) != "R": event = RunqueueEventSleep(prev) next_tasks = list(self.tasks[:]) if prev in self.tasks: if taskState(prev_state) != "R": next_tasks.remove(prev) elif taskState(prev_state) == "R": next_tasks.append(prev) if next not in next_tasks: next_tasks.append(next) return RunqueueSnapshot(next_tasks, event) def migrate_out(self, old): if old not in self.tasks: return self next_tasks = [task for task in self.tasks if task != old] return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old)) def __migrate_in(self, new, event): if new in self.tasks: self.event = event return self next_tasks = self.tasks[:] + tuple([new]) return RunqueueSnapshot(next_tasks, event) def migrate_in(self, new): return self.__migrate_in(new, RunqueueMigrateIn(new)) def wake_up(self, new): return self.__migrate_in(new, RunqueueEventWakeup(new)) def wake_up_new(self, new): return self.__migrate_in(new, RunqueueEventFork(new)) def load(self): """ Provide the number of tasks on the runqueue. Don't count idle""" return len(self.tasks) - 1 def __repr__(self): ret = self.tasks.__repr__() ret += self.origin_tostring() return ret class TimeSlice: def __init__(self, start, prev): self.start = start self.prev = prev self.end = start # cpus that triggered the event self.event_cpus = [] if prev is not None: self.total_load = prev.total_load self.rqs = prev.rqs.copy() else: self.rqs = defaultdict(RunqueueSnapshot) self.total_load = 0 def __update_total_load(self, old_rq, new_rq): diff = new_rq.load() - old_rq.load() self.total_load += diff def sched_switch(self, ts_list, prev, prev_state, next, cpu): old_rq = self.prev.rqs[cpu] new_rq = old_rq.sched_switch(prev, prev_state, next) if old_rq is new_rq: return self.rqs[cpu] = new_rq self.__update_total_load(old_rq, new_rq) ts_list.append(self) self.event_cpus = [cpu] def migrate(self, ts_list, new, old_cpu, new_cpu): if old_cpu == new_cpu: return old_rq = self.prev.rqs[old_cpu] out_rq = old_rq.migrate_out(new) self.rqs[old_cpu] = out_rq self.__update_total_load(old_rq, out_rq) new_rq = self.prev.rqs[new_cpu] in_rq = new_rq.migrate_in(new) self.rqs[new_cpu] = in_rq self.__update_total_load(new_rq, in_rq) ts_list.append(self) if old_rq is not out_rq: self.event_cpus.append(old_cpu) self.event_cpus.append(new_cpu) def wake_up(self, ts_list, pid, cpu, fork): old_rq = self.prev.rqs[cpu] if fork: new_rq = old_rq.wake_up_new(pid) else: new_rq = old_rq.wake_up(pid) if new_rq is old_rq: return self.rqs[cpu] = new_rq self.__update_total_load(old_rq, new_rq) ts_list.append(self) self.event_cpus = [cpu] def next(self, t): self.end = t return TimeSlice(t, self) class TimeSliceList(UserList): def __init__(self, arg = []): self.data = arg def get_time_slice(self, ts): if len(self.data) == 0: slice = TimeSlice(ts, TimeSlice(-1, None)) else: slice = self.data[-1].next(ts) return slice def find_time_slice(self, ts): start = 0 end = len(self.data) found = -1 searching = True while searching: if start == end or start == end - 1: searching = False i = (end + start) / 2 if self.data[i].start <= ts and self.data[i].end >= ts: found = i end = i continue if self.data[i].end < ts: start = i elif self.data[i].start > ts: end = i return found def set_root_win(self, win): self.root_win = win def mouse_down(self, cpu, t): idx = self.find_time_slice(t) if idx == -1: return ts = self[idx] rq = ts.rqs[cpu] raw = "CPU: %d\n" % cpu raw += "Last event : %s\n" % rq.event.__repr__() raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000) raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6)) raw += "Load = %d\n" % rq.load() for t in rq.tasks: raw += "%s \n" % thread_name(t) self.root_win.update_summary(raw) def update_rectangle_cpu(self, slice, cpu): rq = slice.rqs[cpu] if slice.total_load != 0: load_rate = rq.load() / float(slice.total_load) else: load_rate = 0 red_power = int(0xff - (0xff * load_rate)) color = (0xff, red_power, red_power) top_color = None if cpu in slice.event_cpus: top_color = rq.event.color() self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end) def fill_zone(self, start, end): i = self.find_time_slice(start) if i == -1: return for i in xrange(i, len(self.data)): timeslice = self.data[i] if timeslice.start > end: return for cpu in timeslice.rqs: self.update_rectangle_cpu(timeslice, cpu) def interval(self): if len(self.data) == 0: return (0, 0) return (self.data[0].start, self.data[-1].end) def nr_rectangles(self): last_ts = self.data[-1] max_cpu = 0 for cpu in last_ts.rqs: if cpu > max_cpu: max_cpu = cpu return max_cpu class SchedEventProxy: def __init__(self): self.current_tsk = defaultdict(lambda : -1) self.timeslices = TimeSliceList() def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state, next_comm, next_pid, next_prio): """ Ensure the task we sched out this cpu is really the one we logged. Otherwise we may have missed traces """ on_cpu_task = self.current_tsk[headers.cpu] if on_cpu_task != -1 and on_cpu_task != prev_pid: print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \ (headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid) threads[prev_pid] = prev_comm threads[next_pid] = next_comm self.current_tsk[headers.cpu] = next_pid ts = self.timeslices.get_time_slice(headers.ts()) ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu) def migrate(self, headers, pid, prio, orig_cpu, dest_cpu): ts = self.timeslices.get_time_slice(headers.ts()) ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu) def wake_up(self, headers, comm, pid, success, target_cpu, fork): if success == 0: return ts = self.timeslices.get_time_slice(headers.ts()) ts.wake_up(self.timeslices, pid, target_cpu, fork) def trace_begin(): global parser parser = SchedEventProxy() def trace_end(): app = wx.App(False) timeslices = parser.timeslices frame = RootFrame(timeslices, "Migration") app.MainLoop() def sched__sched_stat_runtime(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, comm, pid, runtime, vruntime): pass def sched__sched_stat_iowait(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, comm, pid, delay): pass def sched__sched_stat_sleep(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, comm, pid, delay): pass def sched__sched_stat_wait(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, comm, pid, delay): pass def sched__sched_process_fork(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, parent_comm, parent_pid, child_comm, child_pid): pass def sched__sched_process_wait(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, comm, pid, prio): pass def sched__sched_process_exit(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, comm, pid, prio): pass def sched__sched_process_free(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, comm, pid, prio): pass def sched__sched_migrate_task(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, comm, pid, prio, orig_cpu, dest_cpu): headers = EventHeaders(common_cpu, common_secs, common_nsecs, common_pid, common_comm) parser.migrate(headers, pid, prio, orig_cpu, dest_cpu) def sched__sched_switch(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, prev_comm, prev_pid, prev_prio, prev_state, next_comm, next_pid, next_prio): headers = EventHeaders(common_cpu, common_secs, common_nsecs, common_pid, common_comm) parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state, next_comm, next_pid, next_prio) def sched__sched_wakeup_new(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, comm, pid, prio, success, target_cpu): headers = EventHeaders(common_cpu, common_secs, common_nsecs, common_pid, common_comm) parser.wake_up(headers, comm, pid, success, target_cpu, 1) def sched__sched_wakeup(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, comm, pid, prio, success, target_cpu): headers = EventHeaders(common_cpu, common_secs, common_nsecs, common_pid, common_comm) parser.wake_up(headers, comm, pid, success, target_cpu, 0) def sched__sched_wait_task(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, comm, pid, prio): pass def sched__sched_kthread_stop_ret(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, ret): pass def sched__sched_kthread_stop(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, comm, pid): pass def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm): pass
gpl-2.0
xiaoyuanW/gem5
src/arch/x86/isa/insts/x87/arithmetic/multiplication.py
50
2165
# Copyright (c) 2007 The Hewlett-Packard Development Company # All rights reserved. # # The license below extends only to copyright in the software and shall # not be construed as granting a license to any other intellectual # property including but not limited to intellectual property relating # to a hardware implementation of the functionality of the software # licensed hereunder. You may use the software subject to the license # terms below provided that you ensure that this notice is replicated # unmodified and in its entirety in all distributions of the software, # modified or unmodified, in source code or in binary form. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Gabe Black microcode = ''' # FMUL # FMULP # FIMUL '''
bsd-3-clause
lmacken/moksha
moksha/apps/feeds/moksha/widgets/feeds/widgets.py
1
6186
# This file is part of Moksha. # Copyright (C) 2008-2010 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ :mod:`moksha.widgets.feedtree` - A dynamic feed tree ==================================================== There are currently two implementations of this application, an `ajax` version and a `live` version. The ajax version makes a new request to our WSGI server each time, where as the `live` implementation communicates over a persistent Stomp-driven Orbited TCPSocket. The live widget will automatically connect up to a unique message topic that it uses to communicate with the Moksha Feed Consumer in the Moksha Hub. It also listens for changes in the feeds that it is viewing. .. widgetbrowser:: moksha.widgets.feedtree.moksha_feedreader :tabs: demo, source, template :size: x-large .. moduleauthor:: Luke Macken <lmacken@redhat.com> """ from tg import config from tw.api import js_callback, Widget, JSLink, CSSLink from tw.jquery import jquery_js, jQuery from tw.jquery.dynatree import Dynatree from uuid import uuid4 from moksha.api.widgets.live import LiveWidget class MokshaAjaxFeedTree(Dynatree): title = 'Moksha Ajax Feed Tree' rootVisible = True persist = True initAjax = { 'url': '/apps/feeds/init_tree', 'data': {'key': 'root'} } onActivate = js_callback(""" function(dtnode) { $('#TopPane').load('/apps/feeds/get_entries?key=' + dtnode.data.key.replace(/ /, '')); } """.replace('\n', '')) class MokshaAjaxFeedEntriesTree(Dynatree): rootVisible = False persist = True onActivate = js_callback("""function(dtnode) { $('#BottomPane').load('/apps/feeds/get_entry?key=' + dtnode.data.key); }""") class MokshaLiveFeedTree(Dynatree): title = 'Moksha Live Feed Tree' rootVisible = True persist = True fx = {'height': 'toggle', 'duration': 200} initAjax = { 'url': '/apps/feeds/init_tree', 'data': {'key': 'root'} } onActivate = js_callback("function(dtnode) { moksha.send_message('moksha.feeds', {action: 'get_feed', 'key': dtnode.data.key, topic: moksha_feed_topic}); }") #def __init__(self, *args, **kw): # Dynatree.__init__(self, *args, **kw) # self.template += """ # <script> # var moksha_feed_topic = "${topic}"; # </script> # """ # #LiveWidget.__init__(self, *args, **kw) def update_params(self, d): # the unique queue to use over our stomp TCPSocket d['topic'] = str(uuid4()) Dynatree.update_params(self, d) # apparently the dynatree calls our live widget's update_params for us #LiveWidget.update_params(self, d) #onLazyRead = js_callback("""function(dtnode) { # dtnode.appendAjax({url: '/apps/feeds/get_feed', # data: {key: dtnode.data.key, mode: 'all'}, # cache: false # }); #}""".replace('\n', '')) class MokshaLiveFeedEntriesTree(Dynatree): rootVisible = False persist = True onActivate = js_callback(""" function(dtnode) { moksha.send_message('moksha.feeds', { 'action': 'get_entry', 'key': dtnode.data.key, topic: moksha_feed_topic }); /* Unsubscribe from current feed, subscribe to new one */ } """.replace('\n', '')) ## Load our feed tree widgets. feedtree_engine = config.get('moksha.feedtree.engine', 'live') if feedtree_engine == 'live': # Live widgets feed_tree = MokshaLiveFeedTree('feed_tree') feed_entries_tree = MokshaLiveFeedEntriesTree('feed_entries_tree') elif feedtree_engine == 'ajax': # Ajax widgets feed_tree = MokshaAjaxFeedTree('feed_tree') feed_entries_tree = MokshaAjaxFeedEntriesTree('feed_entries_tree') splitter_js = JSLink(filename='static/splitter.js', javascript=[jquery_js], modname=__name__) splitter_css = CSSLink(filename='static/main.css', media='all', modname=__name__) class MokshaFeedReaderWidget(LiveWidget): name = 'Moksha Feed Reader' params = ['topic'] topic = 'moksha.feeds' # will get replaced by a unique uuid at render-time template = 'mako:moksha.widgets.feeds.templates.feedreader' children = [feed_tree, feed_entries_tree] javascript = [splitter_js] css = [splitter_css] container_options = { 'top': 50, 'left': 50, 'height': 600, 'width': 890, 'icon': 'browser.png', } onmessage = """ if (json.action == 'get_feed') { var tree = $("#moksha_feedreader_feed_entries_tree").dynatree("getRoot"); tree.removeChildren(); tree.append(json.entries); } else if (json.action == 'get_entry') { $('#BottomPane').html(json.content); } else if (json.action == 'new_entry') { /* TODO */ moksha.debug('new_entry!'); } """ def update_params(self, d): d['topic'] = str(uuid4()) super(MokshaFeedReaderWidget, self).update_params(d) self.add_call(jQuery('#' + d.id).splitter({ 'splitVertical': True, 'outline': True, 'sizeLeft': True, 'anchorToWindow': True, 'accessKey': "I", })) self.add_call(jQuery('#RightPane').splitter({ 'splitHorizontal': True, 'sizeTop': True, 'accessKey': "H", })) moksha_feedreader = MokshaFeedReaderWidget('moksha_feedreader')
apache-2.0
TheMOOCAgency/edx-platform
common/test/acceptance/pages/lms/dashboard_search.py
16
1161
""" Dashboard search """ from bok_choy.page_object import PageObject from common.test.acceptance.pages.lms import BASE_URL class DashboardSearchPage(PageObject): """ Dashboard page featuring a search form """ search_bar_selector = '#dashboard-search-bar' url = "{base}/dashboard".format(base=BASE_URL) @property def search_results(self): """ search results list showing """ return self.q(css='#dashboard-search-results') def is_browser_on_page(self): """ did we find the search bar in the UI """ return self.q(css=self.search_bar_selector).present def enter_search_term(self, text): """ enter the search term into the box """ self.q(css=self.search_bar_selector + ' input[type="text"]').fill(text) def search(self): """ execute the search """ self.q(css=self.search_bar_selector + ' [type="submit"]').click() self.wait_for_element_visibility('.search-info', 'Search results are shown') def search_for_term(self, text): """ Search and return results """ self.enter_search_term(text) self.search()
agpl-3.0
cntnboys/cmput410-project
venv/lib/python2.7/site-packages/pip/vcs/git.py
85
7833
from __future__ import absolute_import import logging import tempfile import os.path from pip._vendor.six.moves.urllib import parse as urllib_parse from pip._vendor.six.moves.urllib import request as urllib_request from pip.utils import call_subprocess from pip.utils import display_path, rmtree from pip.vcs import vcs, VersionControl urlsplit = urllib_parse.urlsplit urlunsplit = urllib_parse.urlunsplit logger = logging.getLogger(__name__) class Git(VersionControl): name = 'git' dirname = '.git' repo_name = 'clone' schemes = ( 'git', 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file', ) def __init__(self, url=None, *args, **kwargs): # Works around an apparent Git bug # (see http://article.gmane.org/gmane.comp.version-control.git/146500) if url: scheme, netloc, path, query, fragment = urlsplit(url) if scheme.endswith('file'): initial_slashes = path[:-len(path.lstrip('/'))] newpath = ( initial_slashes + urllib_request.url2pathname(path) .replace('\\', '/').lstrip('/') ) url = urlunsplit((scheme, netloc, newpath, query, fragment)) after_plus = scheme.find('+') + 1 url = scheme[:after_plus] + urlunsplit( (scheme[after_plus:], netloc, newpath, query, fragment), ) super(Git, self).__init__(url, *args, **kwargs) def export(self, location): """Export the Git repository at the url to the destination location""" temp_dir = tempfile.mkdtemp('-export', 'pip-') self.unpack(temp_dir) try: if not location.endswith('/'): location = location + '/' call_subprocess( [self.cmd, 'checkout-index', '-a', '-f', '--prefix', location], filter_stdout=self._filter, show_stdout=False, cwd=temp_dir) finally: rmtree(temp_dir) def check_rev_options(self, rev, dest, rev_options): """Check the revision options before checkout to compensate that tags and branches may need origin/ as a prefix. Returns the SHA1 of the branch or tag if found. """ revisions = self.get_refs(dest) origin_rev = 'origin/%s' % rev if origin_rev in revisions: # remote branch return [revisions[origin_rev]] elif rev in revisions: # a local tag or branch name return [revisions[rev]] else: logger.warning( "Could not find a tag or branch '%s', assuming commit.", rev, ) return rev_options def switch(self, dest, url, rev_options): call_subprocess( [self.cmd, 'config', 'remote.origin.url', url], cwd=dest) call_subprocess( [self.cmd, 'checkout', '-q'] + rev_options, cwd=dest) self.update_submodules(dest) def update(self, dest, rev_options): # First fetch changes from the default remote call_subprocess([self.cmd, 'fetch', '-q'], cwd=dest) # Then reset to wanted revision (maby even origin/master) if rev_options: rev_options = self.check_rev_options( rev_options[0], dest, rev_options, ) call_subprocess( [self.cmd, 'reset', '--hard', '-q'] + rev_options, cwd=dest, ) #: update submodules self.update_submodules(dest) def obtain(self, dest): url, rev = self.get_url_rev() if rev: rev_options = [rev] rev_display = ' (to %s)' % rev else: rev_options = ['origin/master'] rev_display = '' if self.check_destination(dest, url, rev_options, rev_display): logger.info( 'Cloning %s%s to %s', url, rev_display, display_path(dest), ) call_subprocess([self.cmd, 'clone', '-q', url, dest]) if rev: rev_options = self.check_rev_options(rev, dest, rev_options) # Only do a checkout if rev_options differs from HEAD if not self.get_revision(dest).startswith(rev_options[0]): call_subprocess( [self.cmd, 'checkout', '-q'] + rev_options, cwd=dest, ) #: repo may contain submodules self.update_submodules(dest) def get_url(self, location): url = call_subprocess( [self.cmd, 'config', 'remote.origin.url'], show_stdout=False, cwd=location) return url.strip() def get_revision(self, location): current_rev = call_subprocess( [self.cmd, 'rev-parse', 'HEAD'], show_stdout=False, cwd=location) return current_rev.strip() def get_refs(self, location): """Return map of named refs (branches or tags) to commit hashes.""" output = call_subprocess([self.cmd, 'show-ref'], show_stdout=False, cwd=location) rv = {} for line in output.strip().splitlines(): commit, ref = line.split(' ', 1) ref = ref.strip() ref_name = None if ref.startswith('refs/remotes/'): ref_name = ref[len('refs/remotes/'):] elif ref.startswith('refs/heads/'): ref_name = ref[len('refs/heads/'):] elif ref.startswith('refs/tags/'): ref_name = ref[len('refs/tags/'):] if ref_name is not None: rv[ref_name] = commit.strip() return rv def get_src_requirement(self, dist, location, find_tags): repo = self.get_url(location) if not repo.lower().startswith('git:'): repo = 'git+' + repo egg_project_name = dist.egg_name().split('-', 1)[0] if not repo: return None current_rev = self.get_revision(location) refs = self.get_refs(location) # refs maps names to commit hashes; we need the inverse # if multiple names map to a single commit, we pick the first one # alphabetically names_by_commit = {} for ref, commit in sorted(refs.items()): if commit not in names_by_commit: names_by_commit[commit] = ref if current_rev in names_by_commit: # It's a tag or branch. name = names_by_commit[current_rev] full_egg_name = ( '%s-%s' % (egg_project_name, self.translate_egg_surname(name)) ) else: full_egg_name = '%s-dev' % egg_project_name return '%s@%s#egg=%s' % (repo, current_rev, full_egg_name) def get_url_rev(self): """ Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'. That's required because although they use SSH they sometimes doesn't work with a ssh:// scheme (e.g. Github). But we need a scheme for parsing. Hence we remove it again afterwards and return it as a stub. """ if '://' not in self.url: assert 'file:' not in self.url self.url = self.url.replace('git+', 'git+ssh://') url, rev = super(Git, self).get_url_rev() url = url.replace('ssh://', '') else: url, rev = super(Git, self).get_url_rev() return url, rev def update_submodules(self, location): if not os.path.exists(os.path.join(location, '.gitmodules')): return call_subprocess( [self.cmd, 'submodule', 'update', '--init', '--recursive', '-q'], cwd=location, ) vcs.register(Git)
apache-2.0
seann1/portfolio5
.meteor/dev_bundle/lib/node_modules/npm/node_modules/node-gyp/gyp/tools/pretty_sln.py
1831
5099
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Prints the information in a sln file in a diffable way. It first outputs each projects in alphabetical order with their dependencies. Then it outputs a possible build order. """ __author__ = 'nsylvain (Nicolas Sylvain)' import os import re import sys import pretty_vcproj def BuildProject(project, built, projects, deps): # if all dependencies are done, we can build it, otherwise we try to build the # dependency. # This is not infinite-recursion proof. for dep in deps[project]: if dep not in built: BuildProject(dep, built, projects, deps) print project built.append(project) def ParseSolution(solution_file): # All projects, their clsid and paths. projects = dict() # A list of dependencies associated with a project. dependencies = dict() # Regular expressions that matches the SLN format. # The first line of a project definition. begin_project = re.compile(r'^Project\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942' r'}"\) = "(.*)", "(.*)", "(.*)"$') # The last line of a project definition. end_project = re.compile('^EndProject$') # The first line of a dependency list. begin_dep = re.compile( r'ProjectSection\(ProjectDependencies\) = postProject$') # The last line of a dependency list. end_dep = re.compile('EndProjectSection$') # A line describing a dependency. dep_line = re.compile(' *({.*}) = ({.*})$') in_deps = False solution = open(solution_file) for line in solution: results = begin_project.search(line) if results: # Hack to remove icu because the diff is too different. if results.group(1).find('icu') != -1: continue # We remove "_gyp" from the names because it helps to diff them. current_project = results.group(1).replace('_gyp', '') projects[current_project] = [results.group(2).replace('_gyp', ''), results.group(3), results.group(2)] dependencies[current_project] = [] continue results = end_project.search(line) if results: current_project = None continue results = begin_dep.search(line) if results: in_deps = True continue results = end_dep.search(line) if results: in_deps = False continue results = dep_line.search(line) if results and in_deps and current_project: dependencies[current_project].append(results.group(1)) continue # Change all dependencies clsid to name instead. for project in dependencies: # For each dependencies in this project new_dep_array = [] for dep in dependencies[project]: # Look for the project name matching this cldis for project_info in projects: if projects[project_info][1] == dep: new_dep_array.append(project_info) dependencies[project] = sorted(new_dep_array) return (projects, dependencies) def PrintDependencies(projects, deps): print "---------------------------------------" print "Dependencies for all projects" print "---------------------------------------" print "-- --" for (project, dep_list) in sorted(deps.items()): print "Project : %s" % project print "Path : %s" % projects[project][0] if dep_list: for dep in dep_list: print " - %s" % dep print "" print "-- --" def PrintBuildOrder(projects, deps): print "---------------------------------------" print "Build order " print "---------------------------------------" print "-- --" built = [] for (project, _) in sorted(deps.items()): if project not in built: BuildProject(project, built, projects, deps) print "-- --" def PrintVCProj(projects): for project in projects: print "-------------------------------------" print "-------------------------------------" print project print project print project print "-------------------------------------" print "-------------------------------------" project_path = os.path.abspath(os.path.join(os.path.dirname(sys.argv[1]), projects[project][2])) pretty = pretty_vcproj argv = [ '', project_path, '$(SolutionDir)=%s\\' % os.path.dirname(sys.argv[1]), ] argv.extend(sys.argv[3:]) pretty.main(argv) def main(): # check if we have exactly 1 parameter. if len(sys.argv) < 2: print 'Usage: %s "c:\\path\\to\\project.sln"' % sys.argv[0] return 1 (projects, deps) = ParseSolution(sys.argv[1]) PrintDependencies(projects, deps) PrintBuildOrder(projects, deps) if '--recursive' in sys.argv: PrintVCProj(projects) return 0 if __name__ == '__main__': sys.exit(main())
gpl-2.0
cloudbees/changelog-client-python
ccp/client.py
1
2319
# -*- coding: utf-8 -*- """ ccp.client ~~~~~~~~~~~~ This module implements the Changelog API. :license: MIT, see LICENSE for more details. """ import sys import requests import json from time import time import logging from pkg_resources import get_distribution API_HOST = "localhost" API_PORT = 5000 SEVERITY = dict(INFO=1, NOTIFICATION=2, WARNING=3, ERROR=4, CRITICAL=5) class Client(object): def __init__(self, host=API_HOST, port=API_PORT): self.host = host self.port = port self.endpoint = "/api/events" self.logger = logging.getLogger('changelog_client') def deflate_severity(self, severity): if isinstance(severity, int): return severity return SEVERITY[severity] def send(self, message, severity, category="misc", extra_headers=None, customer=None, environment=None): headers = { "User-Agent": "ccp/client v.%s" % get_distribution("ccp").version } url = self.get_url() self.logger.info('Sending changelog event to %s' % url) headers["Content-Type"] = "application/json" if extra_headers is not None: headers.update(extra_headers) data = { "criticality": "%d" % self.deflate_severity(severity), "unix_timestamp": "%d" % time(), "category": category, "description": message } if customer: data['customer'] = customer if environment: data['environment'] = environment try: response = requests.post( url, headers=headers, data=json.dumps(data)) if "OK" in response.text: return True else: self.logger.error( "Failed to send changelog message to server: %s" % response.text) except Exception: exc_info = sys.exc_info() self.logger.exception( "Failed to send changelog message to server") raise exc_info[1], None, exc_info[2] def get_url(self): port = "" if self.port == 80 else ":%d" % self.port protocol = "https://" if self.port == 443 else "http://" base_full_url = "%s%s%s%s" % (protocol, self.host, port, self.endpoint) return base_full_url
mit
erikcas/android_kernel_sony_msm
tools/perf/scripts/python/syscall-counts.py
11181
1522
# system call counts # (c) 2010, Tom Zanussi <tzanussi@gmail.com> # Licensed under the terms of the GNU GPL License version 2 # # Displays system-wide system call totals, broken down by syscall. # If a [comm] arg is specified, only syscalls called by [comm] are displayed. import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import syscall_name usage = "perf script -s syscall-counts.py [comm]\n"; for_comm = None if len(sys.argv) > 2: sys.exit(usage) if len(sys.argv) > 1: for_comm = sys.argv[1] syscalls = autodict() def trace_begin(): print "Press control+C to stop and show the summary" def trace_end(): print_syscall_totals() def raw_syscalls__sys_enter(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, args): if for_comm is not None: if common_comm != for_comm: return try: syscalls[id] += 1 except TypeError: syscalls[id] = 1 def print_syscall_totals(): if for_comm is not None: print "\nsyscall events for %s:\n\n" % (for_comm), else: print "\nsyscall events:\n\n", print "%-40s %10s\n" % ("event", "count"), print "%-40s %10s\n" % ("----------------------------------------", \ "-----------"), for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \ reverse = True): print "%-40s %10d\n" % (syscall_name(id), val),
gpl-2.0
vathpela/blivet
blivet/dbus/constants.py
3
1732
# # Copyright (C) 2016 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): David Lehman <dlehman@redhat.com> # BUS_NAME = "com.redhat.Blivet1" BASE_OBJECT_PATH = "/com/redhat/Blivet1" BLIVET_INTERFACE = "%s.Blivet" % BUS_NAME BLIVET_OBJECT_PATH = "%s/Blivet" % BASE_OBJECT_PATH DEVICE_INTERFACE = "%s.Device" % BUS_NAME DEVICE_OBJECT_PATH_BASE = "%s/Devices" % BASE_OBJECT_PATH DEVICE_REMOVED_OBJECT_PATH_BASE = "%s/RemovedDevices" % BASE_OBJECT_PATH FORMAT_INTERFACE = "%s.Format" % BUS_NAME FORMAT_OBJECT_PATH_BASE = "%s/Formats" % BASE_OBJECT_PATH FORMAT_REMOVED_OBJECT_PATH_BASE = "%s/RemovedFormats" % BASE_OBJECT_PATH ACTION_INTERFACE = "%s.Action" % BUS_NAME ACTION_OBJECT_PATH_BASE = "%s/Actions" % BASE_OBJECT_PATH OBJECT_MANAGER_PATH = BASE_OBJECT_PATH OBJECT_MANAGER_INTERFACE = "org.freedesktop.DBus.ObjectManager"
gpl-2.0
DevangS/CoralNet
annotations/forms.py
1
15430
from decimal import Decimal from exceptions import ValueError from itertools import chain from django.core.exceptions import ValidationError, MultipleObjectsReturned from django.core.urlresolvers import reverse from django.forms import Form from django.forms.fields import BooleanField, CharField, DecimalField, IntegerField from django.forms.widgets import TextInput, HiddenInput from django.utils import simplejson from django.utils.html import conditional_escape from django.utils.safestring import mark_safe from django.utils.encoding import force_unicode from django import forms from django.forms.models import ModelForm from accounts.utils import is_robot_user, get_robot_user from annotations.model_utils import AnnotationAreaUtils from annotations.models import Label, LabelSet, Annotation, AnnotationToolSettings from django.core.mail import mail_admins from CoralNet.forms import FormHelper # Custom widget to enable multiple checkboxes without outputting a wrongful # helptext since I'm modifying the widget used to display labels. # This is a workaround for a bug in Django which associates helptext # with the view instead of with the widget being used. from images.models import Point, Source, Metadata class CustomCheckboxSelectMultiple(forms.CheckboxSelectMultiple): items_per_row = 4 # Number of items per row def render(self, name, value, attrs=None, choices=()): if value is None: value = [] has_id = attrs and 'id' in attrs final_attrs = self.build_attrs(attrs, name=name) output = ['<table><tr>'] # Normalize to strings str_values = set([force_unicode(v) for v in value]) for i, (option_value, option_label) in enumerate(chain(self.choices, choices)): # If an ID attribute was given, add a numeric index as a suffix, # so that the checkboxes don't all have the same ID attribute. if has_id: final_attrs = dict(final_attrs, id='%s_%s' % (attrs['id'], i)) label_for = ' for="%s"' % final_attrs['id'] else: label_for = '' cb = forms.CheckboxInput(final_attrs, check_test=lambda value: value in str_values) option_value = force_unicode(option_value) rendered_cb = cb.render(name, option_value) option_label = conditional_escape(force_unicode(option_label)) if i != 0 and i % self.items_per_row == 0: output.append('</tr><tr>') output.append('<td nowrap><label%s>%s %s</label></td>' % (label_for, rendered_cb, option_label)) output.append('</tr></table>') return mark_safe('\n'.join(output)) class NewLabelForm(ModelForm): class Meta: model = Label widgets = { 'code': TextInput(attrs={'size': 10}), } def clean(self): """ 1. Strip spaces from character fields. 2. Add an error if the specified name or code matches that of an existing label. 3. Call the parent's clean() to finish up with the default behavior. """ data = FormHelper.stripSpacesFromFields( self.cleaned_data, self.fields) if data.has_key('name'): labelsOfSameName = Label.objects.filter(name__iexact=data['name']) if len(labelsOfSameName) > 0: # mark_safe(), along with the |safe template filter, allows HTML in the message. msg = mark_safe('There is already a label with the name %s: <a href="%s" target="_blank">%s</a>' % ( data['name'], reverse('label_main', args=[labelsOfSameName[0].id]), labelsOfSameName[0].name, )) self._errors['name'] = self.error_class([msg]) if data.has_key('code'): labelsOfSameCode = Label.objects.filter(code__iexact=data['code']) if len(labelsOfSameCode) > 0: msg = mark_safe('There is already a label with the short code %s: <a href="%s" target="_blank">%s</a>' % ( data['code'], reverse('label_main', args=[labelsOfSameCode[0].id]), labelsOfSameCode[0].name, )) self._errors['code'] = self.error_class([msg]) self.cleaned_data = data return super(NewLabelForm, self).clean() class NewLabelSetForm(ModelForm): def __init__(self, *args, **kwargs): super(NewLabelSetForm, self).__init__(*args, **kwargs) # Put the label choices in order self.fields['labels'].choices = \ [(label.id, label) for label in Label.objects.all().order_by('group__id', 'name')] # Custom widget for label selection self.fields['labels'].widget = CustomCheckboxSelectMultiple( choices=self.fields['labels'].choices) # Removing "Hold down "Control", or "Command" on a Mac, to select more than one." self.fields['labels'].help_text = '' class Meta: model = LabelSet # description and location are obsolete now that there's a 1-to-1 # correspondence between labelsets and sources. exclude = ('description', 'location') class Media: js = ( # From this app's static folder "js/LabelsetFormHelper.js", ) class AnnotationForm(forms.Form): def __init__(self, *args, **kwargs): image = kwargs.pop('image') user = kwargs.pop('user') show_machine_annotations = kwargs.pop('show_machine_annotations') super(AnnotationForm, self).__init__(*args, **kwargs) self.fields['image_id'] = CharField( widget=HiddenInput(), initial=str(image.id), ) self.fields['user_id'] = CharField( widget=HiddenInput(), initial=str(user.id), ) labelFieldMaxLength = Label._meta.get_field('code').max_length for point in Point.objects.filter(image=image).order_by('point_number'): try: if show_machine_annotations: existingAnnotation = Annotation.objects.get(point=point) else: existingAnnotation = Annotation.objects.exclude(user=get_robot_user()).get(point=point) except Annotation.DoesNotExist: existingAnnotation = None except MultipleObjectsReturned: existingAnnotation = None mail_admins('Multiple annotations returned for a point object', 'Multiple annotations returned for query: Annotations.objects.get(point=point) for Imageid:' + str(image.id) + ', pointid:' + str(point.id) + '. Please investigate.') if existingAnnotation: existingAnnoCode = existingAnnotation.label.code isRobotAnnotation = is_robot_user(existingAnnotation.user) else: existingAnnoCode = '' isRobotAnnotation = None pointNum = point.point_number # Create the text field for annotating a point with a label code. # label_1 for point 1, label_23 for point 23, etc. labelFieldName = 'label_' + str(pointNum) self.fields[labelFieldName] = CharField( widget=TextInput(attrs=dict( size=6, readonly='', )), max_length=labelFieldMaxLength, label=str(pointNum), required=False, initial=existingAnnoCode, ) # Create a hidden field to indicate whether a point is robot-annotated or not. # robot_1 for point 1, robot_23 for point 23, etc. robotFieldName = 'robot_' + str(pointNum) self.fields[robotFieldName] = BooleanField( widget=HiddenInput(), required=False, initial=simplejson.dumps(isRobotAnnotation), ) class AnnotationToolSettingsForm(ModelForm): class Meta: model = AnnotationToolSettings class Media: js = ('jscolor/jscolor.js', 'js/AnnotationToolSettingsHelper.js',) def __init__(self, *args, **kwargs): super(AnnotationToolSettingsForm, self).__init__(*args, **kwargs) # Make text fields have the appropriate size. self.fields['point_marker_size'].widget.attrs.update({'size': 2}) self.fields['point_number_size'].widget.attrs.update({'size': 2}) # Make the color fields have class="color" so they use jscolor. color_fields = [self.fields[name] for name in ['unannotated_point_color', 'robot_annotated_point_color', 'human_annotated_point_color', 'selected_point_color',] ] for field in color_fields: field.widget.attrs.update({'class': 'color'}) class AnnotationToolNavHistoryForm(Form): back = forms.fields.CharField( widget=forms.HiddenInput(), ) forward = forms.fields.CharField( widget=forms.HiddenInput(), ) from_image_id = forms.fields.IntegerField( widget=forms.HiddenInput() ) class AnnotationImageOptionsForm(Form): class Media: js = ('js/AnnotationToolImageHelper.js',) brightness = IntegerField(initial='0', widget=TextInput(attrs={'size': 3})) contrast = DecimalField(initial='0', widget=TextInput(attrs={'size': 3})) class AnnotationAreaPercentsForm(Form): min_x = DecimalField(label="Left boundary X", required=True, min_value=Decimal(0), max_value=Decimal(100), decimal_places=3, widget=TextInput(attrs={'size': 3})) max_x = DecimalField(label="Right boundary X", required=True, min_value=Decimal(0), max_value=Decimal(100), decimal_places=3, widget=TextInput(attrs={'size': 3})) min_y = DecimalField(label="Top boundary Y", required=True, min_value=Decimal(0), max_value=Decimal(100), decimal_places=3, widget=TextInput(attrs={'size': 3})) max_y = DecimalField(label="Bottom boundary Y", required=True, min_value=Decimal(0), max_value=Decimal(100), decimal_places=3, widget=TextInput(attrs={'size': 3})) def __init__(self, *args, **kwargs): """ If a Source is passed in as an argument, then get the annotation area of that Source, and use that to fill the form fields' initial values. """ if kwargs.has_key('source'): source = kwargs.pop('source') if source.image_annotation_area: kwargs['initial'] = AnnotationAreaUtils.db_format_to_percentages(source.image_annotation_area) self.form_help_text = Source._meta.get_field('image_annotation_area').help_text super(AnnotationAreaPercentsForm, self).__init__(*args, **kwargs) def clean(self): data = self.cleaned_data if 'min_x' in data and 'max_x' in data: if data['min_x'] >= data['max_x']: self._errors['max_x'] = self.error_class(["The right boundary x must be greater than the left boundary x."]) del data['min_x'] del data['max_x'] if 'min_y' in data and 'max_y' in data: if data['min_y'] >= data['max_y']: self._errors['max_y'] = self.error_class(["The bottom boundary y must be greater than the top boundary y."]) del data['min_y'] del data['max_y'] self.cleaned_data = data return super(AnnotationAreaPercentsForm, self).clean() class AnnotationAreaPixelsForm(Form): class Media: js = ("js/AnnotationAreaEditHelper.js",) css = { 'all': ("css/annotation_area_edit.css",) } # The complete field definitions are in __init__(), because # max_value needs to be set dynamically. # (We *could* just append the max-value validators dynamically, except # that results in some really weird behavior where the error list grows # with duplicate errors every time you press submit.) min_x = IntegerField() max_x = IntegerField() min_y = IntegerField() max_y = IntegerField() def __init__(self, *args, **kwargs): image = kwargs.pop('image') if image.metadata.annotation_area: d = AnnotationAreaUtils.db_format_to_numbers(image.metadata.annotation_area) annoarea_type = d.pop('type') if annoarea_type == AnnotationAreaUtils.TYPE_PERCENTAGES: kwargs['initial'] = AnnotationAreaUtils.percentages_to_pixels(width=image.original_width, height=image.original_height, **d) elif annoarea_type == AnnotationAreaUtils.TYPE_PIXELS: kwargs['initial'] = d elif annoarea_type == AnnotationAreaUtils.TYPE_IMPORTED: raise ValueError("Points were imported; annotation area should be un-editable.") super(AnnotationAreaPixelsForm, self).__init__(*args, **kwargs) self.fields['min_x'] = IntegerField( label="Left boundary X", required=False, min_value=1, max_value=image.original_width, widget=TextInput(attrs={'size': 3}) ) self.fields['max_x'] = IntegerField( label="Right boundary X", required=False, min_value=1, max_value=image.original_width, widget=TextInput(attrs={'size': 3}) ) self.fields['min_y'] = IntegerField( label="Top boundary Y", required=False, min_value=1, max_value=image.original_height, widget=TextInput(attrs={'size': 3}) ) self.fields['max_y'] = IntegerField( label="Bottom boundary Y", required=False, min_value=1, max_value=image.original_height, widget=TextInput(attrs={'size': 3}) ) self.form_help_text = Metadata._meta.get_field('annotation_area').help_text def clean(self): data = self.cleaned_data field_names = ['min_x', 'max_x', 'min_y', 'max_y'] no_errors_yet = len(filter(lambda key: key not in data, field_names)) == 0 if no_errors_yet: has_empty_fields = len(filter(lambda key: data[key] is None, field_names)) > 0 all_empty_fields = len(filter(lambda key: data[key] is not None, field_names)) == 0 if has_empty_fields and not all_empty_fields: raise ValidationError("You must fill in all four of the annotation area fields.") if 'min_x' in data and 'max_x' in data: if data['min_x'] > data['max_x']: self._errors['max_x'] = self.error_class(["The right boundary x must be greater than or equal to the left boundary x."]) del data['min_x'] del data['max_x'] if 'min_y' in data and 'max_y' in data: if data['min_y'] > data['max_y']: self._errors['max_y'] = self.error_class(["The bottom boundary y must be greater than or equal to the top boundary y."]) del data['min_y'] del data['max_y'] self.cleaned_data = data return super(AnnotationAreaPixelsForm, self).clean()
bsd-2-clause
zero323/spark
python/pyspark/streaming/util.py
23
5614
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import time from datetime import datetime import traceback import sys from py4j.java_gateway import is_instance_of from pyspark import SparkContext, RDD class TransformFunction(object): """ This class wraps a function RDD[X] -> RDD[Y] that was passed to DStream.transform(), allowing it to be called from Java via Py4J's callback server. Java calls this function with a sequence of JavaRDDs and this function returns a single JavaRDD pointer back to Java. """ _emptyRDD = None def __init__(self, ctx, func, *deserializers): self.ctx = ctx self.func = func self.deserializers = deserializers self.rdd_wrap_func = lambda jrdd, ctx, ser: RDD(jrdd, ctx, ser) self.failure = None def rdd_wrapper(self, func): self.rdd_wrap_func = func return self def call(self, milliseconds, jrdds): # Clear the failure self.failure = None try: if self.ctx is None: self.ctx = SparkContext._active_spark_context if not self.ctx or not self.ctx._jsc: # stopped return # extend deserializers with the first one sers = self.deserializers if len(sers) < len(jrdds): sers += (sers[0],) * (len(jrdds) - len(sers)) rdds = [self.rdd_wrap_func(jrdd, self.ctx, ser) if jrdd else None for jrdd, ser in zip(jrdds, sers)] t = datetime.fromtimestamp(milliseconds / 1000.0) r = self.func(t, *rdds) if r: # Here, we work around to ensure `_jrdd` is `JavaRDD` by wrapping it by `map`. # org.apache.spark.streaming.api.python.PythonTransformFunction requires to return # `JavaRDD`; however, this could be `JavaPairRDD` by some APIs, for example, `zip`. # See SPARK-17756. if is_instance_of(self.ctx._gateway, r._jrdd, "org.apache.spark.api.java.JavaRDD"): return r._jrdd else: return r.map(lambda x: x)._jrdd except: self.failure = traceback.format_exc() def getLastFailure(self): return self.failure def __repr__(self): return "TransformFunction(%s)" % self.func class Java: implements = ['org.apache.spark.streaming.api.python.PythonTransformFunction'] class TransformFunctionSerializer(object): """ This class implements a serializer for PythonTransformFunction Java objects. This is necessary because the Java PythonTransformFunction objects are actually Py4J references to Python objects and thus are not directly serializable. When Java needs to serialize a PythonTransformFunction, it uses this class to invoke Python, which returns the serialized function as a byte array. """ def __init__(self, ctx, serializer, gateway=None): self.ctx = ctx self.serializer = serializer self.gateway = gateway or self.ctx._gateway self.gateway.jvm.PythonDStream.registerSerializer(self) self.failure = None def dumps(self, id): # Clear the failure self.failure = None try: func = self.gateway.gateway_property.pool[id] return bytearray(self.serializer.dumps(( func.func, func.rdd_wrap_func, func.deserializers))) except: self.failure = traceback.format_exc() def loads(self, data): # Clear the failure self.failure = None try: f, wrap_func, deserializers = self.serializer.loads(bytes(data)) return TransformFunction(self.ctx, f, *deserializers).rdd_wrapper(wrap_func) except: self.failure = traceback.format_exc() def getLastFailure(self): return self.failure def __repr__(self): return "TransformFunctionSerializer(%s)" % self.serializer class Java: implements = ['org.apache.spark.streaming.api.python.PythonTransformFunctionSerializer'] def rddToFileName(prefix, suffix, timestamp): """ Return string prefix-time(.suffix) Examples -------- >>> rddToFileName("spark", None, 12345678910) 'spark-12345678910' >>> rddToFileName("spark", "tmp", 12345678910) 'spark-12345678910.tmp' """ if isinstance(timestamp, datetime): seconds = time.mktime(timestamp.timetuple()) timestamp = int(seconds * 1000) + timestamp.microsecond // 1000 if suffix is None: return prefix + "-" + str(timestamp) else: return prefix + "-" + str(timestamp) + "." + suffix if __name__ == "__main__": import doctest (failure_count, test_count) = doctest.testmod() if failure_count: sys.exit(-1)
apache-2.0
childsish/lhc-python
lhc/binf/genomic_coordinate/genomic_position.py
1
2868
from functools import total_ordering from lhc.order import natural_key class ChromosomeIdentifier: def __init__(self, chromosome: str): self.chromosome = chromosome self.parts = tuple(natural_key(chromosome)) def __str__(self): return self.chromosome def __hash__(self): return hash(self.chromosome) def __eq__(self, other): if isinstance(other, str): return self.chromosome == other return self.parts == other.parts def __lt__(self, other): if isinstance(other, str): return self.chromosome < other return self.parts < other.parts @total_ordering class GenomicPosition: def __init__(self, chromosome, position, *, strand='+', data=None): self.chromosome = chromosome if isinstance(chromosome, ChromosomeIdentifier) else ChromosomeIdentifier(chromosome) self.position = position self.strand = strand self.data = data def __str__(self): return '{}:{}:{}'.format(self.chromosome, self.strand, self.position + 1) def __repr__(self): return 'GenomicPosition({})'.format(self) def __hash__(self): return hash((self.chromosome, self.position)) def __eq__(self, other): if isinstance(other, int): return self.position == other return self.chromosome == other.chromosome and self.position == other.position and\ self.strand == other.strand def __lt__(self, other): if isinstance(other, int): return self.position < other return (self.chromosome < other.chromosome) or\ (self.chromosome == other.chromosome) and (self.position < other.position) def __add__(self, other): """ Add an integer to the current position :param int other: integer to add :return: new position :rtype: GenomicPosition """ return GenomicPosition(self.chromosome, self.position + other, strand=self.strand) def __sub__(self, other): """ Subtract either an integer from the current position :param int other: integer to subtract :return: new position :rtype: GenomicPosition """ if isinstance(other, GenomicPosition): return self.get_distance_to(other) return GenomicPosition(self.chromosome, self.position - other, strand=self.strand) def get_distance_to(self, other): """ Get the distance between two positions :param GenomicPosition other: other position :return: distance between positions :rtype: int """ if self.chromosome != other.chromosome: raise ValueError('Positions on different chromosomes: "{}" and "{}"'.format(self.chromosome, other.chromosome)) return self.position - other.position
gpl-2.0
pychess/pychess
lib/pychess/perspectives/games/historyPanel.py
2
9491
from gi.repository import Gtk, Gdk from pychess.System import conf from pychess.System.prefix import addDataPrefix from pychess.Utils.const import BLACK from pychess.Utils.Move import toSAN, toFAN from pychess.widgets.Background import hexcol __title__ = _("Move History") __active__ = True __icon__ = addDataPrefix("glade/panel_moves.svg") __desc__ = _( "The moves sheet keeps track of the players' moves and lets you navigate through the game history") class Sidepanel: def load(self, gmwidg): self.gamemodel = gmwidg.board.view.model self.model_cids = [ self.gamemodel.connect_after("game_changed", self.game_changed), self.gamemodel.connect_after("game_started", self.game_started), self.gamemodel.connect_after("moves_undone", self.moves_undone), self.gamemodel.connect_after("game_terminated", self.on_game_terminated), ] self.tv = Gtk.TreeView() self.tv.set_headers_visible(False) self.tv.set_grid_lines(True) self.tv.set_activate_on_single_click(True) self.tv.get_selection().set_mode(Gtk.SelectionMode.NONE) self.activated_cell = (None, None) def is_row_separator(treemodel, treeiter): mvcount, wmove, bmove, row, wbg, bbg = self.store[treeiter] return row == 0 self.tv.set_row_separator_func(is_row_separator) self.tv.connect("style-updated", self.on_style_updated) movetext_font = conf.get("movetextFont") renderer = Gtk.CellRendererText() column = Gtk.TreeViewColumn("mvcount", renderer, text=0) column.set_sizing(Gtk.TreeViewColumnSizing.FIXED) self.tv.append_column(column) self.white_renderer = Gtk.CellRendererText() self.white_renderer.set_property("xalign", 1) self.white_renderer.set_property("font", movetext_font) self.white_column = Gtk.TreeViewColumn("white", self.white_renderer, text=1, background=4) self.white_column.set_sizing(Gtk.TreeViewColumnSizing.FIXED) self.tv.append_column(self.white_column) self.black_renderer = Gtk.CellRendererText() self.black_renderer.set_property("xalign", 1) self.black_renderer.set_property("font", movetext_font) self.black_column = Gtk.TreeViewColumn("black", self.black_renderer, text=2, background=5) self.black_column.set_sizing(Gtk.TreeViewColumnSizing.FIXED) self.tv.append_column(self.black_column) # To prevent black moves column expand to the right we add a dummy column finally renderer = Gtk.CellRendererText() column = Gtk.TreeViewColumn("dummy", renderer) self.tv.append_column(column) scrollwin = Gtk.ScrolledWindow() scrollwin.add(self.tv) # Our liststore elements will be: # mvcount, white move, black move, row number, white move background, black move background self.store = Gtk.ListStore(str, str, str, int, str, str) self.tv.set_model(self.store) self.tv_cid = self.tv.connect('row_activated', self.on_row_activated) self.boardview = gmwidg.board.view self.cid = self.boardview.connect("shownChanged", self.shownChanged) scrollwin.show_all() self.figuresInNotation = conf.get("figuresInNotation") def figuresInNotationCallback(none): game = self.boardview.model if game.lesson_game: return self.figuresInNotation = conf.get("figuresInNotation") for i, move in enumerate(game.moves): board = game.variations[0][i] ply = game.lowply + i + 1 if conf.get("figuresInNotation"): notat = toFAN(board, move) else: notat = toSAN(board, move, True) row, column = self.ply_to_row_col(ply) col = 2 if column == self.black_column else 1 treeiter = self.store.get_iter(Gtk.TreePath(row)) self.store.set_value(treeiter, col, notat) def font_changed(none): movetext_font = conf.get("movetextFont") self.black_renderer.set_property("font", movetext_font) self.white_renderer.set_property("font", movetext_font) self.shownChanged(self.boardview, self.boardview.shown) self.cids_conf = [] self.cids_conf.append(conf.notify_add("movetextFont", font_changed)) self.cids_conf.append(conf.notify_add("figuresInNotation", figuresInNotationCallback)) return scrollwin def get_background_rgba(self, selected=False): if selected: found, color = self.tv.get_style_context().lookup_color("theme_selected_bg_color") else: found, color = self.tv.get_style_context().lookup_color("theme_base_color") return hexcol(Gdk.RGBA(color.red, color.green, color.blue, 1)) def on_style_updated(self, widget): for row in self.store: row[4] = self.get_background_rgba() row[5] = self.get_background_rgba() # update selected cell self.shownChanged(self.boardview, self.boardview.shown) def on_game_terminated(self, model): self.tv.disconnect(self.tv_cid) for cid in self.model_cids: self.gamemodel.disconnect(cid) self.boardview.disconnect(self.cid) for cid in self.cids_conf: conf.notify_remove(cid) def on_row_activated(self, tv, path, col, from_show_changed=False): if col not in (self.white_column, self.black_column): return # Make previous activated cell background color unselected old_row, old_col = self.activated_cell if old_row is not None: bg_col = 5 if old_col == self.black_column else 4 treeiter = self.store.get_iter(Gtk.TreePath(old_row)) self.store.set_value(treeiter, bg_col, self.get_background_rgba(selected=False)) # Make activated cell background color selected self.activated_cell = path[0], col bg_col = 5 if col == self.black_column else 4 treeiter = self.store.get_iter(Gtk.TreePath(path[0])) self.store.set_value(treeiter, bg_col, self.get_background_rgba(selected=True)) index = path[0] * 2 - 1 + (1 if col == self.black_column else 0) if self.gamemodel.starting_color == BLACK: index -= 1 if index < len(self.gamemodel.boards): # Don't set shown board if on_row_activated() was called from shownChanged() if not from_show_changed: board = self.gamemodel.boards[index] self.boardview.setShownBoard(board) def shownChanged(self, boardview, shown): if boardview is None or self.gamemodel is None: return if not boardview.shownIsMainLine(): return row, column = self.ply_to_row_col(shown) try: self.on_row_activated(self, Gtk.TreePath(row), column, from_show_changed=True) self.tv.scroll_to_cell(row) except ValueError: pass # deleted variations by moves_undoing def moves_undone(self, gamemodel, moves): for i in range(moves): treeiter = self.store.get_iter((len(self.store) - 1, )) # If latest move is black move don't remove whole line! if self.store[-1][2]: self.store.set_value(treeiter, 2, "") else: self.store.remove(treeiter) def game_changed(self, gamemodel, ply): if self.boardview is None or self.boardview.model is None: return if len(self.store) == 0: for i in range(len(self.store) + gamemodel.lowply, ply + 1): self.add_move(gamemodel, i) else: self.add_move(gamemodel, ply) self.shownChanged(self.boardview, ply) def game_started(self, game): if game.lesson_game: return self.game_changed(game, game.ply) def add_move(self, gamemodel, ply): if ply == gamemodel.lowply: self.store.append(["%4s." % gamemodel.lowply, "1234567", "1234567", 0, self.get_background_rgba(), self.get_background_rgba()]) return if self.figuresInNotation: notat = toFAN(gamemodel.getBoardAtPly(ply - 1), gamemodel.getMoveAtPly(ply - 1)) else: notat = toSAN(gamemodel.getBoardAtPly(ply - 1), gamemodel.getMoveAtPly(ply - 1), localRepr=True) row, column = self.ply_to_row_col(ply) if len(self.store) - 1 < row: mvcount = "%s." % ((ply + 1) // 2) if column == self.white_column: self.store.append([mvcount, notat, "", row, self.get_background_rgba(), self.get_background_rgba()]) else: self.store.append([mvcount, "", notat, row, self.get_background_rgba(), self.get_background_rgba()]) else: treeiter = self.store.get_iter(Gtk.TreePath(row)) col = 1 if column == self.white_column else 2 self.store.set_value(treeiter, col, notat) def ply_to_row_col(self, ply): col = ply & 1 and self.white_column or self.black_column if self.gamemodel.lowply & 1: row = (ply - self.gamemodel.lowply) // 2 else: row = (ply - self.gamemodel.lowply - 1) // 2 return row + 1, col
gpl-3.0
knowsis/django
django/contrib/gis/geoip/base.py
110
11124
import os import re from ctypes import c_char_p from django.core.validators import ipv4_re from django.contrib.gis.geoip.libgeoip import GEOIP_SETTINGS from django.contrib.gis.geoip.prototypes import ( GeoIPRecord, GeoIPTag, GeoIP_open, GeoIP_delete, GeoIP_database_info, GeoIP_lib_version, GeoIP_record_by_addr, GeoIP_record_by_name, GeoIP_country_code_by_addr, GeoIP_country_code_by_name, GeoIP_country_name_by_addr, GeoIP_country_name_by_name) from django.utils import six from django.utils.encoding import force_bytes # Regular expressions for recognizing the GeoIP free database editions. free_regex = re.compile(r'^GEO-\d{3}FREE') lite_regex = re.compile(r'^GEO-\d{3}LITE') #### GeoIP classes #### class GeoIPException(Exception): pass class GeoIP(object): # The flags for GeoIP memory caching. # GEOIP_STANDARD - read database from filesystem, uses least memory. # # GEOIP_MEMORY_CACHE - load database into memory, faster performance # but uses more memory # # GEOIP_CHECK_CACHE - check for updated database. If database has been # updated, reload filehandle and/or memory cache. This option # is not thread safe. # # GEOIP_INDEX_CACHE - just cache the most frequently accessed index # portion of the database, resulting in faster lookups than # GEOIP_STANDARD, but less memory usage than GEOIP_MEMORY_CACHE - # useful for larger databases such as GeoIP Organization and # GeoIP City. Note, for GeoIP Country, Region and Netspeed # databases, GEOIP_INDEX_CACHE is equivalent to GEOIP_MEMORY_CACHE # # GEOIP_MMAP_CACHE - load database into mmap shared memory ( not available # on Windows). GEOIP_STANDARD = 0 GEOIP_MEMORY_CACHE = 1 GEOIP_CHECK_CACHE = 2 GEOIP_INDEX_CACHE = 4 GEOIP_MMAP_CACHE = 8 cache_options = dict((opt, None) for opt in (0, 1, 2, 4, 8)) # Paths to the city & country binary databases. _city_file = '' _country_file = '' # Initially, pointers to GeoIP file references are NULL. _city = None _country = None def __init__(self, path=None, cache=0, country=None, city=None): """ Initializes the GeoIP object, no parameters are required to use default settings. Keyword arguments may be passed in to customize the locations of the GeoIP data sets. * path: Base directory to where GeoIP data is located or the full path to where the city or country data files (*.dat) are located. Assumes that both the city and country data sets are located in this directory; overrides the GEOIP_PATH settings attribute. * cache: The cache settings when opening up the GeoIP datasets, and may be an integer in (0, 1, 2, 4, 8) corresponding to the GEOIP_STANDARD, GEOIP_MEMORY_CACHE, GEOIP_CHECK_CACHE, GEOIP_INDEX_CACHE, and GEOIP_MMAP_CACHE, `GeoIPOptions` C API settings, respectively. Defaults to 0, meaning that the data is read from the disk. * country: The name of the GeoIP country data file. Defaults to 'GeoIP.dat'; overrides the GEOIP_COUNTRY settings attribute. * city: The name of the GeoIP city data file. Defaults to 'GeoLiteCity.dat'; overrides the GEOIP_CITY settings attribute. """ # Checking the given cache option. if cache in self.cache_options: self._cache = cache else: raise GeoIPException('Invalid GeoIP caching option: %s' % cache) # Getting the GeoIP data path. if not path: path = GEOIP_SETTINGS.get('GEOIP_PATH', None) if not path: raise GeoIPException('GeoIP path must be provided via parameter or the GEOIP_PATH setting.') if not isinstance(path, six.string_types): raise TypeError('Invalid path type: %s' % type(path).__name__) if os.path.isdir(path): # Constructing the GeoIP database filenames using the settings # dictionary. If the database files for the GeoLite country # and/or city datasets exist, then try and open them. country_db = os.path.join(path, country or GEOIP_SETTINGS.get('GEOIP_COUNTRY', 'GeoIP.dat')) if os.path.isfile(country_db): self._country = GeoIP_open(force_bytes(country_db), cache) self._country_file = country_db city_db = os.path.join(path, city or GEOIP_SETTINGS.get('GEOIP_CITY', 'GeoLiteCity.dat')) if os.path.isfile(city_db): self._city = GeoIP_open(force_bytes(city_db), cache) self._city_file = city_db elif os.path.isfile(path): # Otherwise, some detective work will be needed to figure # out whether the given database path is for the GeoIP country # or city databases. ptr = GeoIP_open(force_bytes(path), cache) info = GeoIP_database_info(ptr) if lite_regex.match(info): # GeoLite City database detected. self._city = ptr self._city_file = path elif free_regex.match(info): # GeoIP Country database detected. self._country = ptr self._country_file = path else: raise GeoIPException('Unable to recognize database edition: %s' % info) else: raise GeoIPException('GeoIP path must be a valid file or directory.') def __del__(self): # Cleaning any GeoIP file handles lying around. if GeoIP_delete is None: return if self._country: GeoIP_delete(self._country) if self._city: GeoIP_delete(self._city) def _check_query(self, query, country=False, city=False, city_or_country=False): "Helper routine for checking the query and database availability." # Making sure a string was passed in for the query. if not isinstance(query, six.string_types): raise TypeError('GeoIP query must be a string, not type %s' % type(query).__name__) # Extra checks for the existence of country and city databases. if city_or_country and not (self._country or self._city): raise GeoIPException('Invalid GeoIP country and city data files.') elif country and not self._country: raise GeoIPException('Invalid GeoIP country data file: %s' % self._country_file) elif city and not self._city: raise GeoIPException('Invalid GeoIP city data file: %s' % self._city_file) # Return the query string back to the caller. GeoIP only takes bytestrings. return force_bytes(query) def city(self, query): """ Returns a dictionary of city information for the given IP address or Fully Qualified Domain Name (FQDN). Some information in the dictionary may be undefined (None). """ enc_query = self._check_query(query, city=True) if ipv4_re.match(query): # If an IP address was passed in return GeoIP_record_by_addr(self._city, c_char_p(enc_query)) else: # If a FQDN was passed in. return GeoIP_record_by_name(self._city, c_char_p(enc_query)) def country_code(self, query): "Returns the country code for the given IP Address or FQDN." enc_query = self._check_query(query, city_or_country=True) if self._country: if ipv4_re.match(query): return GeoIP_country_code_by_addr(self._country, enc_query) else: return GeoIP_country_code_by_name(self._country, enc_query) else: return self.city(query)['country_code'] def country_name(self, query): "Returns the country name for the given IP Address or FQDN." enc_query = self._check_query(query, city_or_country=True) if self._country: if ipv4_re.match(query): return GeoIP_country_name_by_addr(self._country, enc_query) else: return GeoIP_country_name_by_name(self._country, enc_query) else: return self.city(query)['country_name'] def country(self, query): """ Returns a dictonary with with the country code and name when given an IP address or a Fully Qualified Domain Name (FQDN). For example, both '24.124.1.80' and 'djangoproject.com' are valid parameters. """ # Returning the country code and name return {'country_code' : self.country_code(query), 'country_name' : self.country_name(query), } #### Coordinate retrieval routines #### def coords(self, query, ordering=('longitude', 'latitude')): cdict = self.city(query) if cdict is None: return None else: return tuple(cdict[o] for o in ordering) def lon_lat(self, query): "Returns a tuple of the (longitude, latitude) for the given query." return self.coords(query) def lat_lon(self, query): "Returns a tuple of the (latitude, longitude) for the given query." return self.coords(query, ('latitude', 'longitude')) def geos(self, query): "Returns a GEOS Point object for the given query." ll = self.lon_lat(query) if ll: from django.contrib.gis.geos import Point return Point(ll, srid=4326) else: return None #### GeoIP Database Information Routines #### @property def country_info(self): "Returns information about the GeoIP country database." if self._country is None: ci = 'No GeoIP Country data in "%s"' % self._country_file else: ci = GeoIP_database_info(self._country) return ci @property def city_info(self): "Retuns information about the GeoIP city database." if self._city is None: ci = 'No GeoIP City data in "%s"' % self._city_file else: ci = GeoIP_database_info(self._city) return ci @property def info(self): "Returns information about the GeoIP library and databases in use." info = '' if GeoIP_lib_version: info += 'GeoIP Library:\n\t%s\n' % GeoIP_lib_version() return info + 'Country:\n\t%s\nCity:\n\t%s' % (self.country_info, self.city_info) #### Methods for compatibility w/the GeoIP-Python API. #### @classmethod def open(cls, full_path, cache): return GeoIP(full_path, cache) def _rec_by_arg(self, arg): if self._city: return self.city(arg) else: return self.country(arg) region_by_addr = city region_by_name = city record_by_addr = _rec_by_arg record_by_name = _rec_by_arg country_code_by_addr = country_code country_code_by_name = country_code country_name_by_addr = country_name country_name_by_name = country_name
bsd-3-clause
Moriadry/tensorflow
tensorflow/contrib/keras/api/keras/applications/vgg16/__init__.py
57
1133
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """VGG16 Keras application.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.keras.python.keras.applications.vgg16 import decode_predictions from tensorflow.contrib.keras.python.keras.applications.vgg16 import preprocess_input from tensorflow.contrib.keras.python.keras.applications.vgg16 import VGG16 del absolute_import del division del print_function
apache-2.0
Zord13appdesa/python-for-android
python3-alpha/extra_modules/bs4/builder/_lxml.py
46
5603
__all__ = [ 'LXMLTreeBuilderForXML', 'LXMLTreeBuilder', ] import collections from lxml import etree from bs4.element import Comment, Doctype, NamespacedAttribute from bs4.builder import ( FAST, HTML, HTMLTreeBuilder, PERMISSIVE, TreeBuilder, XML) from bs4.dammit import UnicodeDammit LXML = 'lxml' class LXMLTreeBuilderForXML(TreeBuilder): DEFAULT_PARSER_CLASS = etree.XMLParser is_xml = True # Well, it's permissive by XML parser standards. features = [LXML, XML, FAST, PERMISSIVE] @property def default_parser(self): # This can either return a parser object or a class, which # will be instantiated with default arguments. return etree.XMLParser(target=self, strip_cdata=False, recover=True) def __init__(self, parser=None, empty_element_tags=None): if empty_element_tags is not None: self.empty_element_tags = set(empty_element_tags) if parser is None: # Use the default parser. parser = self.default_parser if isinstance(parser, collections.Callable): # Instantiate the parser with default arguments parser = parser(target=self, strip_cdata=False) self.parser = parser self.soup = None self.nsmaps = None def _getNsTag(self, tag): # Split the namespace URL out of a fully-qualified lxml tag # name. Copied from lxml's src/lxml/sax.py. if tag[0] == '{': return tuple(tag[1:].split('}', 1)) else: return (None, tag) def prepare_markup(self, markup, user_specified_encoding=None, document_declared_encoding=None): """ :return: A 3-tuple (markup, original encoding, encoding declared within markup). """ if isinstance(markup, str): return markup, None, None, False try_encodings = [user_specified_encoding, document_declared_encoding] dammit = UnicodeDammit(markup, try_encodings, is_html=True) return (dammit.markup, dammit.original_encoding, dammit.declared_html_encoding, dammit.contains_replacement_characters) def feed(self, markup): self.parser.feed(markup) self.parser.close() def close(self): self.nsmaps = None def start(self, name, attrs, nsmap={}): nsprefix = None # Invert each namespace map as it comes in. if len(nsmap) == 0 and self.nsmaps != None: # There are no new namespaces for this tag, but namespaces # are in play, so we need a separate tag stack to know # when they end. self.nsmaps.append(None) elif len(nsmap) > 0: # A new namespace mapping has come into play. if self.nsmaps is None: self.nsmaps = [] inverted_nsmap = dict((value, key) for key, value in list(nsmap.items())) self.nsmaps.append(inverted_nsmap) # Also treat the namespace mapping as a set of attributes on the # tag, so we can recreate it later. attrs = attrs.copy() for prefix, namespace in list(nsmap.items()): attribute = NamespacedAttribute( "xmlns", prefix, "http://www.w3.org/2000/xmlns/") attrs[attribute] = namespace namespace, name = self._getNsTag(name) if namespace is not None: for inverted_nsmap in reversed(self.nsmaps): if inverted_nsmap is not None and namespace in inverted_nsmap: nsprefix = inverted_nsmap[namespace] break self.soup.handle_starttag(name, namespace, nsprefix, attrs) def end(self, name): self.soup.endData() completed_tag = self.soup.tagStack[-1] namespace, name = self._getNsTag(name) nsprefix = None if namespace is not None: for inverted_nsmap in reversed(self.nsmaps): if inverted_nsmap is not None and namespace in inverted_nsmap: nsprefix = inverted_nsmap[namespace] break self.soup.handle_endtag(name, nsprefix) if self.nsmaps != None: # This tag, or one of its parents, introduced a namespace # mapping, so pop it off the stack. self.nsmaps.pop() if len(self.nsmaps) == 0: # Namespaces are no longer in play, so don't bother keeping # track of the namespace stack. self.nsmaps = None def pi(self, target, data): pass def data(self, content): self.soup.handle_data(content) def doctype(self, name, pubid, system): self.soup.endData() doctype = Doctype.for_name_and_ids(name, pubid, system) self.soup.object_was_parsed(doctype) def comment(self, content): "Handle comments as Comment objects." self.soup.endData() self.soup.handle_data(content) self.soup.endData(Comment) def test_fragment_to_document(self, fragment): """See `TreeBuilder`.""" return '<?xml version="1.0" encoding="utf-8"?>\n%s' % fragment class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML): features = [LXML, HTML, FAST, PERMISSIVE] is_xml = False @property def default_parser(self): return etree.HTMLParser def test_fragment_to_document(self, fragment): """See `TreeBuilder`.""" return '<html><body>%s</body></html>' % fragment
apache-2.0
spartonia/django-oscar
src/oscar/management/commands/oscar_cleanup_alerts.py
19
1703
import logging from datetime import timedelta from optparse import make_option from django.core.management.base import BaseCommand from django.utils.timezone import now from oscar.core.loading import get_model ProductAlert = get_model('customer', 'ProductAlert') logger = logging.getLogger(__name__) class Command(BaseCommand): """ Command to remove all stale unconfirmed alerts """ help = "Check unconfirmed alerts and clean them up" option_list = BaseCommand.option_list + ( make_option('--days', dest='days', default=0, help='cleanup alerts older then DAYS from now.'), make_option('--hours', dest='hours', default=0, help='cleanup alerts older then HOURS from now.'), ) def handle(self, *args, **options): """ Generate a threshold date from the input options or 24 hours if no options specified. All alerts that have the status ``UNCONFIRMED`` and have been created before the threshold date will be removed assuming that the emails are wrong or the customer changed their mind. """ delta = timedelta(days=int(options['days']), hours=int(options['hours'])) if not delta: delta = timedelta(hours=24) threshold_date = now() - delta logger.info('Deleting unconfirmed alerts older than %s', threshold_date.strftime("%Y-%m-%d %H:%M")) qs = ProductAlert.objects.filter( status=ProductAlert.UNCONFIRMED, date_created__lt=threshold_date ) logger.info("Found %d stale alerts to delete", qs.count()) qs.delete()
bsd-3-clause
saurabh6790/frappe
frappe/integrations/doctype/paypal_settings/paypal_settings.py
2
14226
# -*- coding: utf-8 -*- # Copyright (c) 2015, Frappe Technologies and contributors # For license information, please see license.txt """ # Integrating PayPal ### 1. Validate Currency Support Example: from frappe.integrations.utils import get_payment_gateway_controller controller = get_payment_gateway_controller("PayPal") controller().validate_transaction_currency(currency) ### 2. Redirect for payment Example: payment_details = { "amount": 600, "title": "Payment for bill : 111", "description": "payment via cart", "reference_doctype": "Payment Request", "reference_docname": "PR0001", "payer_email": "NuranVerkleij@example.com", "payer_name": "Nuran Verkleij", "order_id": "111", "currency": "USD", "payment_gateway": "Razorpay", "subscription_details": { "plan_id": "plan_12313", # if Required "start_date": "2018-08-30", "billing_period": "Month" #(Day, Week, SemiMonth, Month, Year), "billing_frequency": 1, "customer_notify": 1, "upfront_amount": 1000 } } # redirect the user to this url url = controller().get_payment_url(**payment_details) ### 3. On Completion of Payment Write a method for `on_payment_authorized` in the reference doctype Example: def on_payment_authorized(payment_status): # your code to handle callback ##### Note: payment_status - payment gateway will put payment status on callback. For paypal payment status parameter is one from: [Completed, Cancelled, Failed] More Details: <div class="small">For details on how to get your API credentials, follow this link: <a href="https://developer.paypal.com/docs/classic/api/apiCredentials/" target="_blank">https://developer.paypal.com/docs/classic/api/apiCredentials/</a></div> """ from __future__ import unicode_literals import frappe import json import pytz from frappe import _ from six.moves.urllib.parse import urlencode from frappe.model.document import Document from frappe.integrations.utils import create_request_log, make_post_request, create_payment_gateway from frappe.utils import get_url, call_hook_method, cint, get_datetime api_path = '/api/method/frappe.integrations.doctype.paypal_settings.paypal_settings' class PayPalSettings(Document): supported_currencies = ["AUD", "BRL", "CAD", "CZK", "DKK", "EUR", "HKD", "HUF", "ILS", "JPY", "MYR", "MXN", "TWD", "NZD", "NOK", "PHP", "PLN", "GBP", "RUB", "SGD", "SEK", "CHF", "THB", "TRY", "USD"] def __setup__(self): setattr(self, "use_sandbox", 0) def setup_sandbox_env(self, token): data = json.loads(frappe.db.get_value("Integration Request", token, "data")) setattr(self, "use_sandbox", cint(frappe._dict(data).use_sandbox) or 0) def validate(self): create_payment_gateway("PayPal") call_hook_method('payment_gateway_enabled', gateway="PayPal") if not self.flags.ignore_mandatory: self.validate_paypal_credentails() def on_update(self): pass def validate_transaction_currency(self, currency): if currency not in self.supported_currencies: frappe.throw(_("Please select another payment method. PayPal does not support transactions in currency '{0}'").format(currency)) def get_paypal_params_and_url(self): params = { "USER": self.api_username, "PWD": self.get_password(fieldname="api_password", raise_exception=False), "SIGNATURE": self.signature, "VERSION": "98", "METHOD": "GetPalDetails" } if hasattr(self, "use_sandbox") and self.use_sandbox: params.update({ "USER": frappe.conf.sandbox_api_username, "PWD": frappe.conf.sandbox_api_password, "SIGNATURE": frappe.conf.sandbox_signature }) api_url = "https://api-3t.sandbox.paypal.com/nvp" if (self.paypal_sandbox or self.use_sandbox) else "https://api-3t.paypal.com/nvp" return params, api_url def validate_paypal_credentails(self): params, url = self.get_paypal_params_and_url() params = urlencode(params) try: res = make_post_request(url=url, data=params.encode("utf-8")) if res["ACK"][0] == "Failure": raise Exception except Exception: frappe.throw(_("Invalid payment gateway credentials")) def get_payment_url(self, **kwargs): setattr(self, "use_sandbox", cint(kwargs.get("use_sandbox", 0))) response = self.execute_set_express_checkout(**kwargs) if self.paypal_sandbox or self.use_sandbox: return_url = "https://www.sandbox.paypal.com/cgi-bin/webscr?cmd=_express-checkout&token={0}" else: return_url = "https://www.paypal.com/cgi-bin/webscr?cmd=_express-checkout&token={0}" kwargs.update({ "token": response.get("TOKEN")[0], "correlation_id": response.get("CORRELATIONID")[0] }) self.integration_request = create_request_log(kwargs, "Remote", "PayPal", response.get("TOKEN")[0]) return return_url.format(kwargs["token"]) def execute_set_express_checkout(self, **kwargs): params, url = self.get_paypal_params_and_url() params.update({ "METHOD": "SetExpressCheckout", "returnUrl": get_url("{0}.get_express_checkout_details".format(api_path)), "cancelUrl": get_url("/payment-cancel"), "PAYMENTREQUEST_0_PAYMENTACTION": "SALE", "PAYMENTREQUEST_0_AMT": kwargs['amount'], "PAYMENTREQUEST_0_CURRENCYCODE": kwargs['currency'].upper() }) if kwargs.get('subscription_details'): self.configure_recurring_payments(params, kwargs) params = urlencode(params) response = make_post_request(url, data=params.encode("utf-8")) if response.get("ACK")[0] != "Success": frappe.throw(_("Looks like something is wrong with this site's Paypal configuration.")) return response def configure_recurring_payments(self, params, kwargs): # removing the params as we have to setup rucurring payments for param in ('PAYMENTREQUEST_0_PAYMENTACTION', 'PAYMENTREQUEST_0_AMT', 'PAYMENTREQUEST_0_CURRENCYCODE'): del params[param] params.update({ "L_BILLINGTYPE0": "RecurringPayments", #The type of billing agreement "L_BILLINGAGREEMENTDESCRIPTION0": kwargs['description'] }) def get_paypal_and_transaction_details(token): doc = frappe.get_doc("PayPal Settings") doc.setup_sandbox_env(token) params, url = doc.get_paypal_params_and_url() integration_request = frappe.get_doc("Integration Request", token) data = json.loads(integration_request.data) return data, params, url def setup_redirect(data, redirect_url, custom_redirect_to=None, redirect=True): redirect_to = data.get('redirect_to') or None redirect_message = data.get('redirect_message') or None if custom_redirect_to: redirect_to = custom_redirect_to if redirect_to: redirect_url += '&' + urlencode({'redirect_to': redirect_to}) if redirect_message: redirect_url += '&' + urlencode({'redirect_message': redirect_message}) # this is done so that functions called via hooks can update flags.redirect_to if redirect: frappe.local.response["type"] = "redirect" frappe.local.response["location"] = get_url(redirect_url) @frappe.whitelist(allow_guest=True, xss_safe=True) def get_express_checkout_details(token): try: doc = frappe.get_doc("PayPal Settings") doc.setup_sandbox_env(token) params, url = doc.get_paypal_params_and_url() params.update({ "METHOD": "GetExpressCheckoutDetails", "TOKEN": token }) response = make_post_request(url, data=params) if response.get("ACK")[0] != "Success": frappe.respond_as_web_page(_("Something went wrong"), _("Looks like something went wrong during the transaction. Since we haven't confirmed the payment, Paypal will automatically refund you this amount. If it doesn't, please send us an email and mention the Correlation ID: {0}.").format(response.get("CORRELATIONID", [None])[0]), indicator_color='red', http_status_code=frappe.ValidationError.http_status_code) return doc = frappe.get_doc("Integration Request", token) update_integration_request_status(token, { "payerid": response.get("PAYERID")[0], "payer_email": response.get("EMAIL")[0] }, "Authorized", doc=doc) frappe.local.response["type"] = "redirect" frappe.local.response["location"] = get_redirect_uri(doc, token, response.get("PAYERID")[0]) except Exception: frappe.log_error(frappe.get_traceback()) @frappe.whitelist(allow_guest=True, xss_safe=True) def confirm_payment(token): try: custom_redirect_to = None data, params, url = get_paypal_and_transaction_details(token) params.update({ "METHOD": "DoExpressCheckoutPayment", "PAYERID": data.get("payerid"), "TOKEN": token, "PAYMENTREQUEST_0_PAYMENTACTION": "SALE", "PAYMENTREQUEST_0_AMT": data.get("amount"), "PAYMENTREQUEST_0_CURRENCYCODE": data.get("currency").upper() }) response = make_post_request(url, data=params) if response.get("ACK")[0] == "Success": update_integration_request_status(token, { "transaction_id": response.get("PAYMENTINFO_0_TRANSACTIONID")[0], "correlation_id": response.get("CORRELATIONID")[0] }, "Completed") if data.get("reference_doctype") and data.get("reference_docname"): custom_redirect_to = frappe.get_doc(data.get("reference_doctype"), data.get("reference_docname")).run_method("on_payment_authorized", "Completed") frappe.db.commit() redirect_url = '/integrations/payment-success?doctype={0}&docname={1}'.format(data.get("reference_doctype"), data.get("reference_docname")) else: redirect_url = "/integrations/payment-failed" setup_redirect(data, redirect_url, custom_redirect_to) except Exception: frappe.log_error(frappe.get_traceback()) @frappe.whitelist(allow_guest=True, xss_safe=True) def create_recurring_profile(token, payerid): try: custom_redirect_to = None updating = False data, params, url = get_paypal_and_transaction_details(token) addons = data.get("addons") subscription_details = data.get("subscription_details") if data.get('subscription_id'): if addons: updating = True manage_recurring_payment_profile_status(data['subscription_id'], 'Cancel', params, url) params.update({ "METHOD": "CreateRecurringPaymentsProfile", "PAYERID": payerid, "TOKEN": token, "DESC": data.get("description"), "BILLINGPERIOD": subscription_details.get("billing_period"), "BILLINGFREQUENCY": subscription_details.get("billing_frequency"), "AMT": data.get("amount") if data.get("subscription_amount") == data.get("amount") else data.get("subscription_amount"), "CURRENCYCODE": data.get("currency").upper(), "INITAMT": data.get("upfront_amount") }) status_changed_to = 'Completed' if data.get("starting_immediately") or updating else 'Verified' starts_at = get_datetime(subscription_details.get("start_date")) or frappe.utils.now_datetime() starts_at = starts_at.replace(tzinfo=pytz.timezone(frappe.utils.get_time_zone())).astimezone(pytz.utc) #"PROFILESTARTDATE": datetime.utcfromtimestamp(get_timestamp(starts_at)).isoformat() params.update({ "PROFILESTARTDATE": starts_at.isoformat() }) response = make_post_request(url, data=params) if response.get("ACK")[0] == "Success": update_integration_request_status(token, { "profile_id": response.get("PROFILEID")[0], }, "Completed") if data.get("reference_doctype") and data.get("reference_docname"): data['subscription_id'] = response.get("PROFILEID")[0] frappe.flags.data = data custom_redirect_to = frappe.get_doc(data.get("reference_doctype"), data.get("reference_docname")).run_method("on_payment_authorized", status_changed_to) frappe.db.commit() redirect_url = '/integrations/payment-success?doctype={0}&docname={1}'.format(data.get("reference_doctype"), data.get("reference_docname")) else: redirect_url = "/integrations/payment-failed" setup_redirect(data, redirect_url, custom_redirect_to) except Exception: frappe.log_error(frappe.get_traceback()) def update_integration_request_status(token, data, status, error=False, doc=None): if not doc: doc = frappe.get_doc("Integration Request", token) doc.update_status(data, status) def get_redirect_uri(doc, token, payerid): data = json.loads(doc.data) if data.get("subscription_details") or data.get("subscription_id"): return get_url("{0}.create_recurring_profile?token={1}&payerid={2}".format(api_path, token, payerid)) else: return get_url("{0}.confirm_payment?token={1}".format(api_path, token)) def manage_recurring_payment_profile_status(profile_id, action, args, url): args.update({ "METHOD": "ManageRecurringPaymentsProfileStatus", "PROFILEID": profile_id, "ACTION": action }) response = make_post_request(url, data=args) # error code 11556 indicates profile is not in active state(or already cancelled) # thus could not cancel the subscription. # thus raise an exception only if the error code is not equal to 11556 if response.get("ACK")[0] != "Success" and response.get("L_ERRORCODE0", [])[0] != '11556': frappe.throw(_("Failed while amending subscription")) @frappe.whitelist(allow_guest=True) def ipn_handler(): try: data = frappe.local.form_dict validate_ipn_request(data) data.update({ "payment_gateway": "PayPal" }) doc = frappe.get_doc({ "data": json.dumps(frappe.local.form_dict), "doctype": "Integration Request", "integration_type": "Subscription Notification", "status": "Queued" }).insert(ignore_permissions=True) frappe.db.commit() frappe.enqueue(method='frappe.integrations.doctype.paypal_settings.paypal_settings.handle_subscription_notification', queue='long', timeout=600, is_async=True, **{"doctype": "Integration Request", "docname": doc.name}) except frappe.InvalidStatusError: pass except Exception as e: frappe.log(frappe.log_error(title=e)) def validate_ipn_request(data): def _throw(): frappe.throw(_("In Valid Request"), exc=frappe.InvalidStatusError) if not data.get("recurring_payment_id"): _throw() doc = frappe.get_doc("PayPal Settings") params, url = doc.get_paypal_params_and_url() params.update({ "METHOD": "GetRecurringPaymentsProfileDetails", "PROFILEID": data.get("recurring_payment_id") }) params = urlencode(params) res = make_post_request(url=url, data=params.encode("utf-8")) if res['ACK'][0] != 'Success': _throw() def handle_subscription_notification(doctype, docname): call_hook_method("handle_subscription_notification", doctype=doctype, docname=docname)
mit
nagyistoce/odoo-dev-odoo
addons/payment_ogone/tests/test_ogone.py
430
9309
# -*- coding: utf-8 -*- from lxml import objectify import time import urlparse from openerp.addons.payment.models.payment_acquirer import ValidationError from openerp.addons.payment.tests.common import PaymentAcquirerCommon from openerp.addons.payment_ogone.controllers.main import OgoneController from openerp.tools import mute_logger class OgonePayment(PaymentAcquirerCommon): def setUp(self): super(OgonePayment, self).setUp() cr, uid = self.cr, self.uid self.base_url = self.registry('ir.config_parameter').get_param(cr, uid, 'web.base.url') # get the adyen account model, self.ogone_id = self.registry('ir.model.data').get_object_reference(cr, uid, 'payment_ogone', 'payment_acquirer_ogone') def test_10_ogone_form_render(self): cr, uid, context = self.cr, self.uid, {} # be sure not to do stupid thing ogone = self.payment_acquirer.browse(self.cr, self.uid, self.ogone_id, None) self.assertEqual(ogone.environment, 'test', 'test without test environment') # ---------------------------------------- # Test: button direct rendering + shasign # ---------------------------------------- form_values = { 'PSPID': 'dummy', 'ORDERID': 'test_ref0', 'AMOUNT': '1', 'CURRENCY': 'EUR', 'LANGUAGE': 'en_US', 'CN': 'Norbert Buyer', 'EMAIL': 'norbert.buyer@example.com', 'OWNERZIP': '1000', 'OWNERADDRESS': 'Huge Street 2/543', 'OWNERCTY': 'Belgium', 'OWNERTOWN': 'Sin City', 'OWNERTELNO': '0032 12 34 56 78', 'SHASIGN': '815f67b8ff70d234ffcf437c13a9fa7f807044cc', 'ACCEPTURL': '%s' % urlparse.urljoin(self.base_url, OgoneController._accept_url), 'DECLINEURL': '%s' % urlparse.urljoin(self.base_url, OgoneController._decline_url), 'EXCEPTIONURL': '%s' % urlparse.urljoin(self.base_url, OgoneController._exception_url), 'CANCELURL': '%s' % urlparse.urljoin(self.base_url, OgoneController._cancel_url), } # render the button res = self.payment_acquirer.render( cr, uid, self.ogone_id, 'test_ref0', 0.01, self.currency_euro_id, partner_id=None, partner_values=self.buyer_values, context=context) # check form result tree = objectify.fromstring(res) self.assertEqual(tree.get('action'), 'https://secure.ogone.com/ncol/test/orderstandard.asp', 'ogone: wrong form POST url') for form_input in tree.input: if form_input.get('name') in ['submit']: continue self.assertEqual( form_input.get('value'), form_values[form_input.get('name')], 'ogone: wrong value for input %s: received %s instead of %s' % (form_input.get('name'), form_input.get('value'), form_values[form_input.get('name')]) ) # ---------------------------------------- # Test2: button using tx + validation # ---------------------------------------- # create a new draft tx tx_id = self.payment_transaction.create( cr, uid, { 'amount': 0.01, 'acquirer_id': self.ogone_id, 'currency_id': self.currency_euro_id, 'reference': 'test_ref0', 'partner_id': self.buyer_id, }, context=context ) # render the button res = self.payment_acquirer.render( cr, uid, self.ogone_id, 'should_be_erased', 0.01, self.currency_euro, tx_id=tx_id, partner_id=None, partner_values=self.buyer_values, context=context) # check form result tree = objectify.fromstring(res) self.assertEqual(tree.get('action'), 'https://secure.ogone.com/ncol/test/orderstandard.asp', 'ogone: wrong form POST url') for form_input in tree.input: if form_input.get('name') in ['submit']: continue self.assertEqual( form_input.get('value'), form_values[form_input.get('name')], 'ogone: wrong value for form input %s: received %s instead of %s' % (form_input.get('name'), form_input.get('value'), form_values[form_input.get('name')]) ) @mute_logger('openerp.addons.payment_ogone.models.ogone', 'ValidationError') def test_20_ogone_form_management(self): cr, uid, context = self.cr, self.uid, {} # be sure not to do stupid thing ogone = self.payment_acquirer.browse(self.cr, self.uid, self.ogone_id, None) self.assertEqual(ogone.environment, 'test', 'test without test environment') # typical data posted by ogone after client has successfully paid ogone_post_data = { 'orderID': u'test_ref_2', 'STATUS': u'9', 'CARDNO': u'XXXXXXXXXXXX0002', 'PAYID': u'25381582', 'CN': u'Norbert Buyer', 'NCERROR': u'0', 'TRXDATE': u'11/15/13', 'IP': u'85.201.233.72', 'BRAND': u'VISA', 'ACCEPTANCE': u'test123', 'currency': u'EUR', 'amount': u'1.95', 'SHASIGN': u'7B7B0ED9CBC4A85543A9073374589033A62A05A5', 'ED': u'0315', 'PM': u'CreditCard' } # should raise error about unknown tx with self.assertRaises(ValidationError): self.payment_transaction.ogone_form_feedback(cr, uid, ogone_post_data, context=context) # create tx tx_id = self.payment_transaction.create( cr, uid, { 'amount': 1.95, 'acquirer_id': self.ogone_id, 'currency_id': self.currency_euro_id, 'reference': 'test_ref_2', 'partner_name': 'Norbert Buyer', 'partner_country_id': self.country_france_id, }, context=context ) # validate it self.payment_transaction.ogone_form_feedback(cr, uid, ogone_post_data, context=context) # check state tx = self.payment_transaction.browse(cr, uid, tx_id, context=context) self.assertEqual(tx.state, 'done', 'ogone: validation did not put tx into done state') self.assertEqual(tx.ogone_payid, ogone_post_data.get('PAYID'), 'ogone: validation did not update tx payid') # reset tx tx.write({'state': 'draft', 'date_validate': False, 'ogone_payid': False}) # now ogone post is ok: try to modify the SHASIGN ogone_post_data['SHASIGN'] = 'a4c16bae286317b82edb49188d3399249a784691' with self.assertRaises(ValidationError): self.payment_transaction.ogone_form_feedback(cr, uid, ogone_post_data, context=context) # simulate an error ogone_post_data['STATUS'] = 2 ogone_post_data['SHASIGN'] = 'a4c16bae286317b82edb49188d3399249a784691' self.payment_transaction.ogone_form_feedback(cr, uid, ogone_post_data, context=context) # check state tx = self.payment_transaction.browse(cr, uid, tx_id, context=context) self.assertEqual(tx.state, 'error', 'ogone: erroneous validation did not put tx into error state') def test_30_ogone_s2s(self): test_ref = 'test_ref_%.15f' % time.time() cr, uid, context = self.cr, self.uid, {} # be sure not to do stupid thing ogone = self.payment_acquirer.browse(self.cr, self.uid, self.ogone_id, None) self.assertEqual(ogone.environment, 'test', 'test without test environment') # create a new draft tx tx_id = self.payment_transaction.create( cr, uid, { 'amount': 0.01, 'acquirer_id': self.ogone_id, 'currency_id': self.currency_euro_id, 'reference': test_ref, 'partner_id': self.buyer_id, 'type': 'server2server', }, context=context ) # create an alias res = self.payment_transaction.ogone_s2s_create_alias( cr, uid, tx_id, { 'expiry_date_mm': '01', 'expiry_date_yy': '2015', 'holder_name': 'Norbert Poilu', 'number': '4000000000000002', 'brand': 'VISA', }, context=context) # check an alias is set, containing at least OPENERP tx = self.payment_transaction.browse(cr, uid, tx_id, context=context) self.assertIn('OPENERP', tx.partner_reference, 'ogone: wrong partner reference after creating an alias') res = self.payment_transaction.ogone_s2s_execute(cr, uid, tx_id, {}, context=context) # print res # { # 'orderID': u'reference', # 'STATUS': u'9', # 'CARDNO': u'XXXXXXXXXXXX0002', # 'PAYID': u'24998692', # 'CN': u'Norbert Poilu', # 'NCERROR': u'0', # 'TRXDATE': u'11/05/13', # 'IP': u'85.201.233.72', # 'BRAND': u'VISA', # 'ACCEPTANCE': u'test123', # 'currency': u'EUR', # 'amount': u'1.95', # 'SHASIGN': u'EFDC56879EF7DE72CCF4B397076B5C9A844CB0FA', # 'ED': u'0314', # 'PM': u'CreditCard' # }
agpl-3.0
archf/ansible
lib/ansible/modules/utilities/logic/fail.py
113
1256
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright 2012 Dag Wieers <dag@wieers.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'core'} DOCUMENTATION = ''' --- module: fail short_description: Fail with custom message description: - This module fails the progress with a custom message. It can be useful for bailing out when a certain condition is met using C(when). - This module is also supported for Windows targets. version_added: "0.8" options: msg: description: - The customized message used for failing execution. If omitted, fail will simply bail out with a generic message. required: false default: "'Failed as requested from task'" notes: - This module is also supported for Windows targets. author: "Dag Wieers (@dagwieers)" ''' EXAMPLES = ''' # Example playbook using fail and when together - fail: msg: "The system may not be provisioned according to the CMDB status." when: cmdb_status != "to-be-staged" '''
gpl-3.0
udacity/ggplot
ggplot/geoms/geom_tile.py
12
3695
from __future__ import (absolute_import, division, print_function, unicode_literals) import pandas as pd import numpy as np from .geom import geom from matplotlib.patches import Rectangle import matplotlib.colors as colors import matplotlib.colorbar as colorbar class geom_tile(geom): DEFAULT_AES = {} REQUIRED_AES = {'x', 'y', 'fill'} DEFAULT_PARAMS = {'stat': 'identity', 'position': 'identity'} _aes_renames = {} _units = set() def _plot_unit(self, pinfo, ax): x = pinfo.pop('x') y = pinfo.pop('y') fill = pinfo.pop('fill') # TODO: Fix this hack! # Currently, if the fill is specified in the ggplot aes wrapper, ggplot # will assign colors without regard to the fill values. This is okay for # categorical maps but not heatmaps. At this stage in the pipeline the # geom can't recover the original values. # # However, if the fill is specified in the geom_tile aes wrapper, the # original fill values are sent unaltered, so we can make a heat map # with the values. # Was the fill specified in geom wrapper only? (i.e. not in ggplot) if 'fill' in self.aes_unique_to_geom: # Determine if there are non-numeric values. if False in [isinstance(v, (int, long, float, complex)) for v in set(fill)]: # No need to handle this case. Instruct the user to put categorical # values in the ggplot wrapper. raise Exception('For categorical fill values specify fill in the ggplot aes instead of the geom_tile aes.') # All values are numeric so determine fill using colormap. else: fill_min = np.min(fill) fill_max = np.max(fill) if np.isnan(fill_min): raise Exception('Fill values cannot contain NaN values.') fill_rng = float(fill_max - fill_min) fill_vals = (fill - fill_min) / fill_rng cmap = self.gg.colormap(fill_vals.tolist()) fill = [colors.rgb2hex(c) for c in cmap[::, :3]] df = pd.DataFrame( {'x': x, 'y': y, 'fill': fill}).set_index(['x', 'y']).unstack(0) # Setup axes. x_ticks = range(2*len(set(x)) + 1) y_ticks = range(2*len(set(y)) + 1) x_indices = sorted(set(x)) y_indices = sorted(set(y)) # Setup box plotting parameters. x_start = 0 y_start = 0 x_step = 2 y_step = 2 # Plot grid. on_y = y_start for yi in xrange(len(y_indices)): on_x = x_start for xi in xrange(len(x_indices)): color = df.iloc[yi,xi] if not isinstance(color, float): ax.add_patch(Rectangle((on_x, on_y), x_step, y_step, facecolor=color)) on_x += x_step on_y += y_step # Draw the colorbar scale if drawing a heat map. if 'cmap' in locals(): norm = colors.Normalize(vmin = fill_min, vmax = fill_max) cax, kw = colorbar.make_axes(ax) cax.hold(True) colorbar.ColorbarBase(cax, cmap = self.gg.colormap, norm = norm) # Set axis labels and ticks. x_labels = ['']*(len(x_indices)+1) for i,v in enumerate(x_indices): x_labels.insert(2*i+1, v) y_labels = ['']*(len(y_indices)+1) for i,v in enumerate(y_indices): y_labels.insert(2*i+1, v) ax.set_xticklabels(x_labels) ax.set_xticks(x_ticks) ax.set_yticklabels(y_labels) ax.set_yticks(y_ticks)
bsd-2-clause
GNOME/hamster-applet
src/hamster/widgets/dayline.py
3
13606
# -*- coding: utf-8 -*- # Copyright (C) 2007-2010 Toms Bauģis <toms.baugis at gmail.com> # This file is part of Project Hamster. # Project Hamster is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # Project Hamster is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with Project Hamster. If not, see <http://www.gnu.org/licenses/>. import gtk import gobject import time import datetime as dt from ..lib import stuff, graphics, pytweener from ..configuration import conf class Selection(graphics.Sprite): def __init__(self, start_time = None, end_time = None): graphics.Sprite.__init__(self, z_order = 100) self.start_time, self.end_time = None, None self.width, self.height = None, None self.fill = None # will be set to proper theme color on render self.fixed = False self.start_label = graphics.Label("", 8, "#333", visible = False) self.end_label = graphics.Label("", 8, "#333", visible = False) self.duration_label = graphics.Label("", 8, "#FFF", visible = False) self.add_child(self.start_label, self.end_label, self.duration_label) self.connect("on-render", self.on_render) def on_render(self, sprite): if not self.fill: # not ready yet return self.graphics.rectangle(0, 0, self.width, self.height) self.graphics.fill(self.fill, 0.3) self.graphics.rectangle(0.5, 0.5, self.width, self.height) self.graphics.stroke(self.fill) # adjust labels self.start_label.visible = self.fixed == False and self.start_time is not None if self.start_label.visible: self.start_label.text = self.start_time.strftime("%H:%M") if self.x - self.start_label.width - 5 > 0: self.start_label.x = -self.start_label.width - 5 else: self.start_label.x = 5 self.start_label.y = self.height self.end_label.visible = self.fixed == False and self.end_time is not None if self.end_label.visible: self.end_label.text = self.end_time.strftime("%H:%M") self.end_label.x = self.width + 5 self.end_label.y = self.height duration = self.end_time - self.start_time duration = int(duration.seconds / 60) self.duration_label.text = "%02d:%02d" % (duration / 60, duration % 60) self.duration_label.visible = self.duration_label.width < self.width if self.duration_label.visible: self.duration_label.y = (self.height - self.duration_label.height) / 2 self.duration_label.x = (self.width - self.duration_label.width) / 2 else: self.duration_label.visible = False class DayLine(graphics.Scene): __gsignals__ = { "on-time-chosen": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, gobject.TYPE_PYOBJECT)), } def __init__(self, start_time = None): graphics.Scene.__init__(self) day_start = conf.get("day_start_minutes") self.day_start = dt.time(day_start / 60, day_start % 60) self.view_time = start_time or dt.datetime.combine(dt.date.today(), self.day_start) self.scope_hours = 24 self.fact_bars = [] self.categories = [] self.connect("on-enter-frame", self.on_enter_frame) self.connect("on-mouse-move", self.on_mouse_move) self.connect("on-mouse-down", self.on_mouse_down) self.connect("on-mouse-up", self.on_mouse_up) self.connect("on-click", self.on_click) self.plot_area = graphics.Sprite() self.selection = Selection() self.chosen_selection = Selection() self.plot_area.add_child(self.selection, self.chosen_selection) self.drag_start = None self.current_x = None self.snap_points = [] self.add_child(self.plot_area) def plot(self, date, facts, select_start, select_end = None): for bar in self.fact_bars: self.plot_area.sprites.remove(bar) self.fact_bars = [] for fact in facts: fact_bar = graphics.Rectangle(0, 0, fill = "#aaa", stroke="#aaa") # dimensions will depend on screen situation fact_bar.fact = fact if fact.category in self.categories: fact_bar.category = self.categories.index(fact.category) else: fact_bar.category = len(self.categories) self.categories.append(fact.category) self.plot_area.add_child(fact_bar) self.fact_bars.append(fact_bar) self.view_time = dt.datetime.combine((select_start - dt.timedelta(hours=self.day_start.hour, minutes=self.day_start.minute)).date(), self.day_start) if select_start and select_start > dt.datetime.now(): select_start = dt.datetime.now() self.chosen_selection.start_time = select_start if select_end and select_end > dt.datetime.now(): select_end = dt.datetime.now() self.chosen_selection.end_time = select_end self.chosen_selection.width = None self.chosen_selection.fixed = True self.chosen_selection.visible = True self.redraw() def on_mouse_down(self, scene, event): self.drag_start = self.current_x self.chosen_selection.visible = False def on_mouse_up(self, scene, event): if self.drag_start: self.drag_start = None start_time = self.selection.start_time if start_time > dt.datetime.now(): start_time = dt.datetime.now() end_time = self.selection.end_time self.new_selection() self.emit("on-time-chosen", start_time, end_time) def on_click(self, scene, event, target): self.drag_start = None start_time = self.selection.start_time if start_time > dt.datetime.now(): start_time = dt.datetime.now() end_time = None if self.fact_bars: times = [bar.fact.start_time for bar in self.fact_bars if bar.fact.start_time - start_time > dt.timedelta(minutes=5)] times.extend([bar.fact.start_time + bar.fact.delta for bar in self.fact_bars if bar.fact.start_time + bar.fact.delta - start_time > dt.timedelta(minutes=5)]) if times: end_time = min(times) self.new_selection() self.emit("on-time-chosen", start_time, end_time) def new_selection(self): self.plot_area.sprites.remove(self.selection) self.selection = Selection() self.plot_area.add_child(self.selection) self.redraw() def on_mouse_move(self, scene, event): if self.current_x: active_bar = None # find if we are maybe on a bar for bar in self.fact_bars: if bar.x < self.current_x < bar.x + bar.width: active_bar = bar break if active_bar: self.set_tooltip_text("%s - %s" % (active_bar.fact.activity, active_bar.fact.category)) else: self.set_tooltip_text("") self.redraw() def on_enter_frame(self, scene, context): g = graphics.Graphics(context) self.plot_area.y = 15.5 self.plot_area.height = self.height - 30 vertical = min(self.plot_area.height / 5 , 7) minute_pixel = (self.scope_hours * 60.0 - 15) / self.width snap_points = [] g.set_line_style(width=1) bottom = self.plot_area.y + self.plot_area.height for bar in self.fact_bars: bar.y = vertical * bar.category + 5 bar.height = vertical bar_start_time = bar.fact.start_time - self.view_time minutes = bar_start_time.seconds / 60 + bar_start_time.days * self.scope_hours * 60 bar.x = round(minutes / minute_pixel) + 0.5 bar.width = round((bar.fact.delta).seconds / 60 / minute_pixel) if not snap_points or bar.x - snap_points[-1][0] > 1: snap_points.append((bar.x, bar.fact.start_time)) if not snap_points or bar.x + bar.width - snap_points[-1][0] > 1: snap_points.append((bar.x + bar.width, bar.fact.start_time + bar.fact.delta)) self.snap_points = snap_points if self.chosen_selection.start_time and self.chosen_selection.width is None: # we have time but no pixels minutes = round((self.chosen_selection.start_time - self.view_time).seconds / 60 / minute_pixel) + 0.5 self.chosen_selection.x = minutes if self.chosen_selection.end_time: self.chosen_selection.width = round((self.chosen_selection.end_time - self.chosen_selection.start_time).seconds / 60 / minute_pixel) else: self.chosen_selection.width = 0 self.chosen_selection.height = self.chosen_selection.parent.height # use the oportunity to set proper colors too self.chosen_selection.fill = self.get_style().bg[gtk.STATE_SELECTED].to_string() self.chosen_selection.duration_label.color = self.get_style().fg[gtk.STATE_SELECTED].to_string() self.selection.visible = self._mouse_in # TODO - think harder about the mouse_out event self.selection.width = 0 self.selection.height = self.selection.parent.height if self.mouse_x: start_x = max(min(self.mouse_x, self.width-1), 0) #mouse, but within screen regions # check for snap points start_x = start_x + 0.5 minutes = int(round(start_x * minute_pixel / 15)) * 15 start_time = self.view_time + dt.timedelta(hours = minutes / 60, minutes = minutes % 60) if snap_points: delta, closest_snap, time = min((abs(start_x - i), i, time) for i, time in snap_points) if abs(closest_snap - start_x) < 5 and (not self.drag_start or self.drag_start != closest_snap): start_x = closest_snap minutes = (time.hour - self.day_start.hour) * 60 + time.minute - self.day_start.minute start_time = time self.current_x = minutes / minute_pixel end_time, end_x = None, None if self.drag_start: minutes = int(self.drag_start * minute_pixel) end_time = self.view_time + dt.timedelta(hours = minutes / 60, minutes = minutes % 60) end_x = round(self.drag_start) + 0.5 if end_time and end_time < start_time: start_time, end_time = end_time, start_time start_x, end_x = end_x, start_x self.selection.start_time = start_time self.selection.end_time = end_time self.selection.x = start_x if end_time: self.selection.width = end_x - start_x self.selection.y = 0 self.selection.fill = self.get_style().bg[gtk.STATE_SELECTED].to_string() self.selection.duration_label.color = self.get_style().fg[gtk.STATE_SELECTED].to_string() #time scale g.set_color("#000") background = self.get_style().bg[gtk.STATE_NORMAL].to_string() text = self.get_style().text[gtk.STATE_NORMAL].to_string() tick_color = g.colors.contrast(background, 80) layout = g.create_layout(size = 8) for i in range(self.scope_hours * 60): label_time = (self.view_time + dt.timedelta(minutes=i)) g.set_color(tick_color) if label_time.minute == 0: g.move_to(round(i / minute_pixel) + 0.5, bottom - 15) g.line_to(round(i / minute_pixel) + 0.5, bottom) g.stroke() elif label_time.minute % 15 == 0: g.move_to(round(i / minute_pixel) + 0.5, bottom - 5) g.line_to(round(i / minute_pixel) + 0.5, bottom) g.stroke() if label_time.minute == 0 and label_time.hour % 4 == 0: if label_time.hour == 0: g.move_to(round(i / minute_pixel) + 0.5, self.plot_area.y) g.line_to(round(i / minute_pixel) + 0.5, bottom) label_minutes = label_time.strftime("%b %d") else: label_minutes = label_time.strftime("%H<small><sup>%M</sup></small>") g.set_color(text) layout.set_markup(label_minutes) label_w, label_h = layout.get_pixel_size() g.move_to(round(i / minute_pixel) + 2, 0) context.show_layout(layout) #current time if self.view_time < dt.datetime.now() < self.view_time + dt.timedelta(hours = self.scope_hours): minutes = round((dt.datetime.now() - self.view_time).seconds / 60 / minute_pixel) + 0.5 g.move_to(minutes, self.plot_area.y) g.line_to(minutes, bottom) g.stroke("#f00", 0.4) snap_points.append(minutes - 0.5)
gpl-3.0
disconnect3d/pwndbg
pwndbg/argv.py
4
1102
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import gdb import pwndbg.abi import pwndbg.arch import pwndbg.events import pwndbg.memory import pwndbg.regs #: Total number of arguments argc = None #: Pointer to argv on the stack argv = None #: Pointer to envp on the stack envp = None #: Total number of environment variables envc = None @pwndbg.events.start @pwndbg.abi.LinuxOnly() def update(): global argc global argv global envp global envc pwndbg.arch.update() # :-( sp = pwndbg.regs.sp ptrsize = pwndbg.arch.ptrsize ptrbits = 8 * ptrsize try: argc = pwndbg.memory.u(sp, ptrbits) except: return sp += ptrsize argv = sp while pwndbg.memory.u(sp, ptrbits): sp += ptrsize sp += ptrsize envp = sp envc = 0 try: while pwndbg.memory.u(sp, ptrbits): sp += ptrsize envc += 1 except gdb.MemoryError: pass
mit
havard024/prego
venv/lib/python2.7/site-packages/unidecode/x077.py
252
4675
data = ( 'Ming ', # 0x00 'Sheng ', # 0x01 'Shi ', # 0x02 'Yun ', # 0x03 'Mian ', # 0x04 'Pan ', # 0x05 'Fang ', # 0x06 'Miao ', # 0x07 'Dan ', # 0x08 'Mei ', # 0x09 'Mao ', # 0x0a 'Kan ', # 0x0b 'Xian ', # 0x0c 'Ou ', # 0x0d 'Shi ', # 0x0e 'Yang ', # 0x0f 'Zheng ', # 0x10 'Yao ', # 0x11 'Shen ', # 0x12 'Huo ', # 0x13 'Da ', # 0x14 'Zhen ', # 0x15 'Kuang ', # 0x16 'Ju ', # 0x17 'Shen ', # 0x18 'Chi ', # 0x19 'Sheng ', # 0x1a 'Mei ', # 0x1b 'Mo ', # 0x1c 'Zhu ', # 0x1d 'Zhen ', # 0x1e 'Zhen ', # 0x1f 'Mian ', # 0x20 'Di ', # 0x21 'Yuan ', # 0x22 'Die ', # 0x23 'Yi ', # 0x24 'Zi ', # 0x25 'Zi ', # 0x26 'Chao ', # 0x27 'Zha ', # 0x28 'Xuan ', # 0x29 'Bing ', # 0x2a 'Mi ', # 0x2b 'Long ', # 0x2c 'Sui ', # 0x2d 'Dong ', # 0x2e 'Mi ', # 0x2f 'Die ', # 0x30 'Yi ', # 0x31 'Er ', # 0x32 'Ming ', # 0x33 'Xuan ', # 0x34 'Chi ', # 0x35 'Kuang ', # 0x36 'Juan ', # 0x37 'Mou ', # 0x38 'Zhen ', # 0x39 'Tiao ', # 0x3a 'Yang ', # 0x3b 'Yan ', # 0x3c 'Mo ', # 0x3d 'Zhong ', # 0x3e 'Mai ', # 0x3f 'Zhao ', # 0x40 'Zheng ', # 0x41 'Mei ', # 0x42 'Jun ', # 0x43 'Shao ', # 0x44 'Han ', # 0x45 'Huan ', # 0x46 'Di ', # 0x47 'Cheng ', # 0x48 'Cuo ', # 0x49 'Juan ', # 0x4a 'E ', # 0x4b 'Wan ', # 0x4c 'Xian ', # 0x4d 'Xi ', # 0x4e 'Kun ', # 0x4f 'Lai ', # 0x50 'Jian ', # 0x51 'Shan ', # 0x52 'Tian ', # 0x53 'Hun ', # 0x54 'Wan ', # 0x55 'Ling ', # 0x56 'Shi ', # 0x57 'Qiong ', # 0x58 'Lie ', # 0x59 'Yai ', # 0x5a 'Jing ', # 0x5b 'Zheng ', # 0x5c 'Li ', # 0x5d 'Lai ', # 0x5e 'Sui ', # 0x5f 'Juan ', # 0x60 'Shui ', # 0x61 'Sui ', # 0x62 'Du ', # 0x63 'Bi ', # 0x64 'Bi ', # 0x65 'Mu ', # 0x66 'Hun ', # 0x67 'Ni ', # 0x68 'Lu ', # 0x69 'Yi ', # 0x6a 'Jie ', # 0x6b 'Cai ', # 0x6c 'Zhou ', # 0x6d 'Yu ', # 0x6e 'Hun ', # 0x6f 'Ma ', # 0x70 'Xia ', # 0x71 'Xing ', # 0x72 'Xi ', # 0x73 'Gun ', # 0x74 'Cai ', # 0x75 'Chun ', # 0x76 'Jian ', # 0x77 'Mei ', # 0x78 'Du ', # 0x79 'Hou ', # 0x7a 'Xuan ', # 0x7b 'Ti ', # 0x7c 'Kui ', # 0x7d 'Gao ', # 0x7e 'Rui ', # 0x7f 'Mou ', # 0x80 'Xu ', # 0x81 'Fa ', # 0x82 'Wen ', # 0x83 'Miao ', # 0x84 'Chou ', # 0x85 'Kui ', # 0x86 'Mi ', # 0x87 'Weng ', # 0x88 'Kou ', # 0x89 'Dang ', # 0x8a 'Chen ', # 0x8b 'Ke ', # 0x8c 'Sou ', # 0x8d 'Xia ', # 0x8e 'Qiong ', # 0x8f 'Mao ', # 0x90 'Ming ', # 0x91 'Man ', # 0x92 'Shui ', # 0x93 'Ze ', # 0x94 'Zhang ', # 0x95 'Yi ', # 0x96 'Diao ', # 0x97 'Ou ', # 0x98 'Mo ', # 0x99 'Shun ', # 0x9a 'Cong ', # 0x9b 'Lou ', # 0x9c 'Chi ', # 0x9d 'Man ', # 0x9e 'Piao ', # 0x9f 'Cheng ', # 0xa0 'Ji ', # 0xa1 'Meng ', # 0xa2 '[?] ', # 0xa3 'Run ', # 0xa4 'Pie ', # 0xa5 'Xi ', # 0xa6 'Qiao ', # 0xa7 'Pu ', # 0xa8 'Zhu ', # 0xa9 'Deng ', # 0xaa 'Shen ', # 0xab 'Shun ', # 0xac 'Liao ', # 0xad 'Che ', # 0xae 'Xian ', # 0xaf 'Kan ', # 0xb0 'Ye ', # 0xb1 'Xu ', # 0xb2 'Tong ', # 0xb3 'Mou ', # 0xb4 'Lin ', # 0xb5 'Kui ', # 0xb6 'Xian ', # 0xb7 'Ye ', # 0xb8 'Ai ', # 0xb9 'Hui ', # 0xba 'Zhan ', # 0xbb 'Jian ', # 0xbc 'Gu ', # 0xbd 'Zhao ', # 0xbe 'Qu ', # 0xbf 'Wei ', # 0xc0 'Chou ', # 0xc1 'Sao ', # 0xc2 'Ning ', # 0xc3 'Xun ', # 0xc4 'Yao ', # 0xc5 'Huo ', # 0xc6 'Meng ', # 0xc7 'Mian ', # 0xc8 'Bin ', # 0xc9 'Mian ', # 0xca 'Li ', # 0xcb 'Kuang ', # 0xcc 'Jue ', # 0xcd 'Xuan ', # 0xce 'Mian ', # 0xcf 'Huo ', # 0xd0 'Lu ', # 0xd1 'Meng ', # 0xd2 'Long ', # 0xd3 'Guan ', # 0xd4 'Man ', # 0xd5 'Xi ', # 0xd6 'Chu ', # 0xd7 'Tang ', # 0xd8 'Kan ', # 0xd9 'Zhu ', # 0xda 'Mao ', # 0xdb 'Jin ', # 0xdc 'Lin ', # 0xdd 'Yu ', # 0xde 'Shuo ', # 0xdf 'Ce ', # 0xe0 'Jue ', # 0xe1 'Shi ', # 0xe2 'Yi ', # 0xe3 'Shen ', # 0xe4 'Zhi ', # 0xe5 'Hou ', # 0xe6 'Shen ', # 0xe7 'Ying ', # 0xe8 'Ju ', # 0xe9 'Zhou ', # 0xea 'Jiao ', # 0xeb 'Cuo ', # 0xec 'Duan ', # 0xed 'Ai ', # 0xee 'Jiao ', # 0xef 'Zeng ', # 0xf0 'Huo ', # 0xf1 'Bai ', # 0xf2 'Shi ', # 0xf3 'Ding ', # 0xf4 'Qi ', # 0xf5 'Ji ', # 0xf6 'Zi ', # 0xf7 'Gan ', # 0xf8 'Wu ', # 0xf9 'Tuo ', # 0xfa 'Ku ', # 0xfb 'Qiang ', # 0xfc 'Xi ', # 0xfd 'Fan ', # 0xfe 'Kuang ', # 0xff )
mit
pokowaka/atreus-firmware
tmk/tmk_core/tool/mbed/mbed-sdk/workspace_tools/host_tests/host_tests_plugins/module_reset_mps2.py
30
2470
""" mbed SDK Copyright (c) 2011-2013 ARM Limited Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os from host_test_plugins import HostTestPluginBase # Note: This plugin is not fully functional, needs improvements class HostTestPluginResetMethod_MPS2(HostTestPluginBase): """ Plugin used to reset ARM_MPS2 platform Supports: reboot.txt - startup from standby state, reboots when in run mode. shutdown.txt - shutdown from run mode. reset.txt - reset FPGA during run mode. """ def touch_file(self, path): """ Touch file and set timestamp to items """ with open(path, 'a'): os.utime(path, None) # Plugin interface name = 'HostTestPluginResetMethod_MPS2' type = 'ResetMethod' capabilities = ['reboot.txt', 'shutdown.txt', 'reset.txt'] required_parameters = ['disk'] def setup(self, *args, **kwargs): """ Prepare / configure plugin to work. This method can receive plugin specific parameters by kwargs and ignore other parameters which may affect other plugins. """ return True def execute(self, capabilitity, *args, **kwargs): """ Executes capability by name. Each capability may directly just call some command line program or execute building pythonic function """ result = False if self.check_parameters(capabilitity, *args, **kwargs) is True: if capabilitity == 'reboot.txt': # TODO: Implement touch file for reboot pass elif capabilitity == 'shutdown.txt': # TODO: Implement touch file for shutdown pass elif capabilitity == 'reset.txt': # TODO: Implement touch file for reset pass return result def load_plugin(): """ Returns plugin available in this module """ return HostTestPluginResetMethod_MPS2()
gpl-3.0
eayunstack/horizon
openstack_dashboard/dashboards/project/instances/tables.py
12
40417
# Copyright 2012 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from django.conf import settings from django.core import urlresolvers from django.http import HttpResponse # noqa from django import shortcuts from django import template from django.template.defaultfilters import title # noqa from django.utils.http import urlencode from django.utils.translation import npgettext_lazy from django.utils.translation import pgettext_lazy from django.utils.translation import string_concat # noqa from django.utils.translation import ugettext_lazy as _ from django.utils.translation import ungettext_lazy from horizon import conf from horizon import exceptions from horizon import messages from horizon import tables from horizon.templatetags import sizeformat from horizon.utils import filters from openstack_dashboard import api from openstack_dashboard.dashboards.project.access_and_security.floating_ips \ import workflows from openstack_dashboard.dashboards.project.instances import tabs from openstack_dashboard.dashboards.project.instances.workflows \ import resize_instance from openstack_dashboard.dashboards.project.instances.workflows \ import update_instance from openstack_dashboard import policy LOG = logging.getLogger(__name__) ACTIVE_STATES = ("ACTIVE",) VOLUME_ATTACH_READY_STATES = ("ACTIVE", "SHUTOFF") SNAPSHOT_READY_STATES = ("ACTIVE", "SHUTOFF", "PAUSED", "SUSPENDED") POWER_STATES = { 0: "NO STATE", 1: "RUNNING", 2: "BLOCKED", 3: "PAUSED", 4: "SHUTDOWN", 5: "SHUTOFF", 6: "CRASHED", 7: "SUSPENDED", 8: "FAILED", 9: "BUILDING", } PAUSE = 0 UNPAUSE = 1 SUSPEND = 0 RESUME = 1 def is_deleting(instance): task_state = getattr(instance, "OS-EXT-STS:task_state", None) if not task_state: return False return task_state.lower() == "deleting" class TerminateInstance(policy.PolicyTargetMixin, tables.BatchAction): name = "terminate" classes = ("btn-danger",) icon = "remove" policy_rules = (("compute", "compute:delete"),) help_text = _("Terminated instances are not recoverable.") @staticmethod def action_present(count): return ungettext_lazy( u"Terminate Instance", u"Terminate Instances", count ) @staticmethod def action_past(count): return ungettext_lazy( u"Scheduled termination of Instance", u"Scheduled termination of Instances", count ) def allowed(self, request, instance=None): """Allow terminate action if instance not currently being deleted.""" return not is_deleting(instance) def action(self, request, obj_id): api.nova.server_delete(request, obj_id) class RebootInstance(policy.PolicyTargetMixin, tables.BatchAction): name = "reboot" classes = ('btn-danger', 'btn-reboot') policy_rules = (("compute", "compute:reboot"),) help_text = _("Restarted instances will lose any data" " not saved in persistent storage.") @staticmethod def action_present(count): return ungettext_lazy( u"Hard Reboot Instance", u"Hard Reboot Instances", count ) @staticmethod def action_past(count): return ungettext_lazy( u"Hard Rebooted Instance", u"Hard Rebooted Instances", count ) def allowed(self, request, instance=None): if instance is not None: return ((instance.status in ACTIVE_STATES or instance.status == 'SHUTOFF') and not is_deleting(instance)) else: return True def action(self, request, obj_id): api.nova.server_reboot(request, obj_id, soft_reboot=False) class SoftRebootInstance(RebootInstance): name = "soft_reboot" @staticmethod def action_present(count): return ungettext_lazy( u"Soft Reboot Instance", u"Soft Reboot Instances", count ) @staticmethod def action_past(count): return ungettext_lazy( u"Soft Rebooted Instance", u"Soft Rebooted Instances", count ) def action(self, request, obj_id): api.nova.server_reboot(request, obj_id, soft_reboot=True) class TogglePause(tables.BatchAction): name = "pause" icon = "pause" @staticmethod def action_present(count): return ( ungettext_lazy( u"Pause Instance", u"Pause Instances", count ), ungettext_lazy( u"Resume Instance", u"Resume Instances", count ), ) @staticmethod def action_past(count): return ( ungettext_lazy( u"Paused Instance", u"Paused Instances", count ), ungettext_lazy( u"Resumed Instance", u"Resumed Instances", count ), ) def allowed(self, request, instance=None): if not api.nova.extension_supported('AdminActions', request): return False if not instance: return False self.paused = instance.status == "PAUSED" if self.paused: self.current_present_action = UNPAUSE policy = (("compute", "compute_extension:admin_actions:unpause"),) else: self.current_present_action = PAUSE policy = (("compute", "compute_extension:admin_actions:pause"),) has_permission = True policy_check = getattr(settings, "POLICY_CHECK_FUNCTION", None) if policy_check: has_permission = policy_check( policy, request, target={'project_id': getattr(instance, 'tenant_id', None)}) return (has_permission and (instance.status in ACTIVE_STATES or self.paused) and not is_deleting(instance)) def action(self, request, obj_id): if self.paused: api.nova.server_unpause(request, obj_id) self.current_past_action = UNPAUSE else: api.nova.server_pause(request, obj_id) self.current_past_action = PAUSE class ToggleSuspend(tables.BatchAction): name = "suspend" classes = ("btn-suspend",) @staticmethod def action_present(count): return ( ungettext_lazy( u"Suspend Instance", u"Suspend Instances", count ), ungettext_lazy( u"Resume Instance", u"Resume Instances", count ), ) @staticmethod def action_past(count): return ( ungettext_lazy( u"Suspended Instance", u"Suspended Instances", count ), ungettext_lazy( u"Resumed Instance", u"Resumed Instances", count ), ) def allowed(self, request, instance=None): if not api.nova.extension_supported('AdminActions', request): return False if not instance: return False self.suspended = instance.status == "SUSPENDED" if self.suspended: self.current_present_action = RESUME policy = (("compute", "compute_extension:admin_actions:resume"),) else: self.current_present_action = SUSPEND policy = (("compute", "compute_extension:admin_actions:suspend"),) has_permission = True policy_check = getattr(settings, "POLICY_CHECK_FUNCTION", None) if policy_check: has_permission = policy_check( policy, request, target={'project_id': getattr(instance, 'tenant_id', None)}) return (has_permission and (instance.status in ACTIVE_STATES or self.suspended) and not is_deleting(instance)) def action(self, request, obj_id): if self.suspended: api.nova.server_resume(request, obj_id) self.current_past_action = RESUME else: api.nova.server_suspend(request, obj_id) self.current_past_action = SUSPEND class LaunchLink(tables.LinkAction): name = "launch" verbose_name = _("Launch Instance") url = "horizon:project:instances:launch" classes = ("ajax-modal", "btn-launch") icon = "cloud-upload" policy_rules = (("compute", "compute:create"),) ajax = True def __init__(self, attrs=None, **kwargs): kwargs['preempt'] = True super(LaunchLink, self).__init__(attrs, **kwargs) def allowed(self, request, datum): try: limits = api.nova.tenant_absolute_limits(request, reserved=True) instances_available = limits['maxTotalInstances'] \ - limits['totalInstancesUsed'] cores_available = limits['maxTotalCores'] \ - limits['totalCoresUsed'] ram_available = limits['maxTotalRAMSize'] - limits['totalRAMUsed'] if instances_available <= 0 or cores_available <= 0 \ or ram_available <= 0: if "disabled" not in self.classes: self.classes = [c for c in self.classes] + ['disabled'] self.verbose_name = string_concat(self.verbose_name, ' ', _("(Quota exceeded)")) else: self.verbose_name = _("Launch Instance") classes = [c for c in self.classes if c != "disabled"] self.classes = classes except Exception: LOG.exception("Failed to retrieve quota information") # If we can't get the quota information, leave it to the # API to check when launching return True # The action should always be displayed def single(self, table, request, object_id=None): self.allowed(request, None) return HttpResponse(self.render()) class LaunchLinkNG(LaunchLink): name = "launch-ng" verbose_name = _("Launch Instance NG") ajax = False classes = ("btn-launch") def __init__(self, attrs={ "ng-controller": "LaunchInstanceModalCtrl", "ng-click": "openLaunchInstanceWizard(" + "{successUrl: '/project/instances/'})" }, **kwargs): kwargs['preempt'] = True super(LaunchLink, self).__init__(attrs, **kwargs) def get_link_url(self, datum=None): return "javascript:void(0);" class EditInstance(policy.PolicyTargetMixin, tables.LinkAction): name = "edit" verbose_name = _("Edit Instance") url = "horizon:project:instances:update" classes = ("ajax-modal",) icon = "pencil" policy_rules = (("compute", "compute:update"),) def get_link_url(self, project): return self._get_link_url(project, 'instance_info') def _get_link_url(self, project, step_slug): base_url = urlresolvers.reverse(self.url, args=[project.id]) next_url = self.table.get_full_url() params = {"step": step_slug, update_instance.UpdateInstance.redirect_param_name: next_url} param = urlencode(params) return "?".join([base_url, param]) def allowed(self, request, instance): return not is_deleting(instance) class EditInstanceSecurityGroups(EditInstance): name = "edit_secgroups" verbose_name = _("Edit Security Groups") def get_link_url(self, project): return self._get_link_url(project, 'update_security_groups') def allowed(self, request, instance=None): return (instance.status in ACTIVE_STATES and not is_deleting(instance) and request.user.tenant_id == instance.tenant_id) class CreateSnapshot(policy.PolicyTargetMixin, tables.LinkAction): name = "snapshot" verbose_name = _("Create Snapshot") url = "horizon:project:images:snapshots:create" classes = ("ajax-modal",) icon = "camera" policy_rules = (("compute", "compute:snapshot"),) def allowed(self, request, instance=None): return instance.status in SNAPSHOT_READY_STATES \ and not is_deleting(instance) class ConsoleLink(policy.PolicyTargetMixin, tables.LinkAction): name = "console" verbose_name = _("Console") url = "horizon:project:instances:detail" classes = ("btn-console",) policy_rules = (("compute", "compute_extension:consoles"),) def allowed(self, request, instance=None): # We check if ConsoleLink is allowed only if settings.CONSOLE_TYPE is # not set at all, or if it's set to any value other than None or False. return bool(getattr(settings, 'CONSOLE_TYPE', True)) and \ instance.status in ACTIVE_STATES and not is_deleting(instance) def get_link_url(self, datum): base_url = super(ConsoleLink, self).get_link_url(datum) tab_query_string = tabs.ConsoleTab( tabs.InstanceDetailTabs).get_query_string() return "?".join([base_url, tab_query_string]) class LogLink(policy.PolicyTargetMixin, tables.LinkAction): name = "log" verbose_name = _("View Log") url = "horizon:project:instances:detail" classes = ("btn-log",) policy_rules = (("compute", "compute_extension:console_output"),) def allowed(self, request, instance=None): return instance.status in ACTIVE_STATES and not is_deleting(instance) def get_link_url(self, datum): base_url = super(LogLink, self).get_link_url(datum) tab_query_string = tabs.LogTab( tabs.InstanceDetailTabs).get_query_string() return "?".join([base_url, tab_query_string]) class ResizeLink(policy.PolicyTargetMixin, tables.LinkAction): name = "resize" verbose_name = _("Resize Instance") url = "horizon:project:instances:resize" classes = ("ajax-modal", "btn-resize") policy_rules = (("compute", "compute:resize"),) def get_link_url(self, project): return self._get_link_url(project, 'flavor_choice') def _get_link_url(self, project, step_slug): base_url = urlresolvers.reverse(self.url, args=[project.id]) next_url = self.table.get_full_url() params = {"step": step_slug, resize_instance.ResizeInstance.redirect_param_name: next_url} param = urlencode(params) return "?".join([base_url, param]) def allowed(self, request, instance): return ((instance.status in ACTIVE_STATES or instance.status == 'SHUTOFF') and not is_deleting(instance)) class ConfirmResize(policy.PolicyTargetMixin, tables.Action): name = "confirm" verbose_name = _("Confirm Resize/Migrate") classes = ("btn-confirm", "btn-action-required") policy_rules = (("compute", "compute:confirm_resize"),) def allowed(self, request, instance): return instance.status == 'VERIFY_RESIZE' def single(self, table, request, instance): api.nova.server_confirm_resize(request, instance) class RevertResize(policy.PolicyTargetMixin, tables.Action): name = "revert" verbose_name = _("Revert Resize/Migrate") classes = ("btn-revert", "btn-action-required") policy_rules = (("compute", "compute:revert_resize"),) def allowed(self, request, instance): return instance.status == 'VERIFY_RESIZE' def single(self, table, request, instance): api.nova.server_revert_resize(request, instance) class RebuildInstance(policy.PolicyTargetMixin, tables.LinkAction): name = "rebuild" verbose_name = _("Rebuild Instance") classes = ("btn-rebuild", "ajax-modal") url = "horizon:project:instances:rebuild" policy_rules = (("compute", "compute:rebuild"),) def allowed(self, request, instance): return ((instance.status in ACTIVE_STATES or instance.status == 'SHUTOFF') and not is_deleting(instance)) def get_link_url(self, datum): instance_id = self.table.get_object_id(datum) return urlresolvers.reverse(self.url, args=[instance_id]) class DecryptInstancePassword(tables.LinkAction): name = "decryptpassword" verbose_name = _("Retrieve Password") classes = ("btn-decrypt", "ajax-modal") url = "horizon:project:instances:decryptpassword" def allowed(self, request, instance): enable = getattr(settings, 'OPENSTACK_ENABLE_PASSWORD_RETRIEVE', False) return (enable and (instance.status in ACTIVE_STATES or instance.status == 'SHUTOFF') and not is_deleting(instance) and get_keyname(instance) is not None) def get_link_url(self, datum): instance_id = self.table.get_object_id(datum) keypair_name = get_keyname(datum) return urlresolvers.reverse(self.url, args=[instance_id, keypair_name]) class AssociateIP(policy.PolicyTargetMixin, tables.LinkAction): name = "associate" verbose_name = _("Associate Floating IP") url = "horizon:project:access_and_security:floating_ips:associate" classes = ("ajax-modal",) icon = "link" policy_rules = (("compute", "network:associate_floating_ip"),) def allowed(self, request, instance): if not api.network.floating_ip_supported(request): return False if api.network.floating_ip_simple_associate_supported(request): return False if instance.status == "ERROR": return False return not is_deleting(instance) def get_link_url(self, datum): base_url = urlresolvers.reverse(self.url) next_url = self.table.get_full_url() params = { "instance_id": self.table.get_object_id(datum), workflows.IPAssociationWorkflow.redirect_param_name: next_url} params = urlencode(params) return "?".join([base_url, params]) class SimpleAssociateIP(policy.PolicyTargetMixin, tables.Action): name = "associate-simple" verbose_name = _("Associate Floating IP") icon = "link" policy_rules = (("compute", "network:associate_floating_ip"),) def allowed(self, request, instance): if not api.network.floating_ip_simple_associate_supported(request): return False if instance.status == "ERROR": return False return not is_deleting(instance) def single(self, table, request, instance_id): try: # target_id is port_id for Neutron and instance_id for Nova Network # (Neutron API wrapper returns a 'portid_fixedip' string) target_id = api.network.floating_ip_target_get_by_instance( request, instance_id).split('_')[0] fip = api.network.tenant_floating_ip_allocate(request) api.network.floating_ip_associate(request, fip.id, target_id) messages.success(request, _("Successfully associated floating IP: %s") % fip.ip) except Exception: exceptions.handle(request, _("Unable to associate floating IP.")) return shortcuts.redirect(request.get_full_path()) class SimpleDisassociateIP(policy.PolicyTargetMixin, tables.Action): name = "disassociate" verbose_name = _("Disassociate Floating IP") classes = ("btn-danger", "btn-disassociate",) policy_rules = (("compute", "network:disassociate_floating_ip"),) def allowed(self, request, instance): if not api.network.floating_ip_supported(request): return False if not conf.HORIZON_CONFIG["simple_ip_management"]: return False return not is_deleting(instance) def single(self, table, request, instance_id): try: # target_id is port_id for Neutron and instance_id for Nova Network # (Neutron API wrapper returns a 'portid_fixedip' string) targets = api.network.floating_ip_target_list_by_instance( request, instance_id) target_ids = [t.split('_')[0] for t in targets] fips = [fip for fip in api.network.tenant_floating_ip_list(request) if fip.port_id in target_ids] # Removing multiple floating IPs at once doesn't work, so this pops # off the first one. if fips: fip = fips.pop() api.network.floating_ip_disassociate(request, fip.id) messages.success(request, _("Successfully disassociated " "floating IP: %s") % fip.ip) else: messages.info(request, _("No floating IPs to disassociate.")) except Exception: exceptions.handle(request, _("Unable to disassociate floating IP.")) return shortcuts.redirect(request.get_full_path()) def instance_fault_to_friendly_message(instance): fault = getattr(instance, 'fault', {}) message = fault.get('message', _("Unknown")) default_message = _("Please try again later [Error: %s].") % message fault_map = { 'NoValidHost': _("There is not enough capacity for this " "flavor in the selected availability zone. " "Try again later or select a different availability " "zone.") } return fault_map.get(message, default_message) def get_instance_error(instance): if instance.status.lower() != 'error': return None message = instance_fault_to_friendly_message(instance) preamble = _('Failed to perform requested operation on instance "%s", the ' 'instance has an error status') % instance.name or instance.id message = string_concat(preamble, ': ', message) return message class UpdateRow(tables.Row): ajax = True def get_data(self, request, instance_id): instance = api.nova.server_get(request, instance_id) try: instance.full_flavor = api.nova.flavor_get(request, instance.flavor["id"]) except Exception: exceptions.handle(request, _('Unable to retrieve flavor information ' 'for instance "%s".') % instance_id, ignore=True) error = get_instance_error(instance) if error: messages.error(request, error) return instance class StartInstance(policy.PolicyTargetMixin, tables.BatchAction): name = "start" classes = ('btn-confirm',) policy_rules = (("compute", "compute:start"),) @staticmethod def action_present(count): return ungettext_lazy( u"Start Instance", u"Start Instances", count ) @staticmethod def action_past(count): return ungettext_lazy( u"Started Instance", u"Started Instances", count ) def allowed(self, request, instance): return ((instance is None) or (instance.status in ("SHUTDOWN", "SHUTOFF", "CRASHED"))) def action(self, request, obj_id): api.nova.server_start(request, obj_id) class StopInstance(policy.PolicyTargetMixin, tables.BatchAction): name = "stop" classes = ('btn-danger',) policy_rules = (("compute", "compute:stop"),) help_text = _("To power off a specific instance.") @staticmethod def action_present(count): return npgettext_lazy( "Action to perform (the instance is currently running)", u"Shut Off Instance", u"Shut Off Instances", count ) @staticmethod def action_past(count): return npgettext_lazy( "Past action (the instance is currently already Shut Off)", u"Shut Off Instance", u"Shut Off Instances", count ) def allowed(self, request, instance): return ((instance is None) or ((get_power_state(instance) in ("RUNNING", "SUSPENDED")) and not is_deleting(instance))) def action(self, request, obj_id): api.nova.server_stop(request, obj_id) class LockInstance(policy.PolicyTargetMixin, tables.BatchAction): name = "lock" policy_rules = (("compute", "compute_extension:admin_actions:lock"),) @staticmethod def action_present(count): return ungettext_lazy( u"Lock Instance", u"Lock Instances", count ) @staticmethod def action_past(count): return ungettext_lazy( u"Locked Instance", u"Locked Instances", count ) # TODO(akrivoka): When the lock status is added to nova, revisit this # to only allow unlocked instances to be locked def allowed(self, request, instance): if not api.nova.extension_supported('AdminActions', request): return False return True def action(self, request, obj_id): api.nova.server_lock(request, obj_id) class UnlockInstance(policy.PolicyTargetMixin, tables.BatchAction): name = "unlock" policy_rules = (("compute", "compute_extension:admin_actions:unlock"),) @staticmethod def action_present(count): return ungettext_lazy( u"Unlock Instance", u"Unlock Instances", count ) @staticmethod def action_past(count): return ungettext_lazy( u"Unlocked Instance", u"Unlocked Instances", count ) # TODO(akrivoka): When the lock status is added to nova, revisit this # to only allow locked instances to be unlocked def allowed(self, request, instance): if not api.nova.extension_supported('AdminActions', request): return False return True def action(self, request, obj_id): api.nova.server_unlock(request, obj_id) def get_ips(instance): template_name = 'project/instances/_instance_ips.html' ip_groups = {} for ip_group, addresses in instance.addresses.iteritems(): ip_groups[ip_group] = {} ip_groups[ip_group]["floating"] = [] ip_groups[ip_group]["non_floating"] = [] for address in addresses: if ('OS-EXT-IPS:type' in address and address['OS-EXT-IPS:type'] == "floating"): ip_groups[ip_group]["floating"].append(address) else: ip_groups[ip_group]["non_floating"].append(address) context = { "ip_groups": ip_groups, } return template.loader.render_to_string(template_name, context) def get_size(instance): if hasattr(instance, "full_flavor"): template_name = 'project/instances/_instance_flavor.html' size_ram = sizeformat.mb_float_format(instance.full_flavor.ram) if instance.full_flavor.disk > 0: size_disk = sizeformat.diskgbformat(instance.full_flavor.disk) else: size_disk = _("%s GB") % "0" context = { "name": instance.full_flavor.name, "id": instance.id, "size_disk": size_disk, "size_ram": size_ram, "vcpus": instance.full_flavor.vcpus, "flavor_id": instance.full_flavor.id } return template.loader.render_to_string(template_name, context) return _("Not available") def get_keyname(instance): if hasattr(instance, "key_name"): keyname = instance.key_name return keyname return _("Not available") def get_power_state(instance): return POWER_STATES.get(getattr(instance, "OS-EXT-STS:power_state", 0), '') STATUS_DISPLAY_CHOICES = ( ("deleted", pgettext_lazy("Current status of an Instance", u"Deleted")), ("active", pgettext_lazy("Current status of an Instance", u"Active")), ("shutoff", pgettext_lazy("Current status of an Instance", u"Shutoff")), ("suspended", pgettext_lazy("Current status of an Instance", u"Suspended")), ("paused", pgettext_lazy("Current status of an Instance", u"Paused")), ("error", pgettext_lazy("Current status of an Instance", u"Error")), ("resize", pgettext_lazy("Current status of an Instance", u"Resize/Migrate")), ("verify_resize", pgettext_lazy("Current status of an Instance", u"Confirm or Revert Resize/Migrate")), ("revert_resize", pgettext_lazy( "Current status of an Instance", u"Revert Resize/Migrate")), ("reboot", pgettext_lazy("Current status of an Instance", u"Reboot")), ("hard_reboot", pgettext_lazy("Current status of an Instance", u"Hard Reboot")), ("password", pgettext_lazy("Current status of an Instance", u"Password")), ("rebuild", pgettext_lazy("Current status of an Instance", u"Rebuild")), ("migrating", pgettext_lazy("Current status of an Instance", u"Migrating")), ("build", pgettext_lazy("Current status of an Instance", u"Build")), ("rescue", pgettext_lazy("Current status of an Instance", u"Rescue")), ("deleted", pgettext_lazy("Current status of an Instance", u"Deleted")), ("soft_deleted", pgettext_lazy("Current status of an Instance", u"Soft Deleted")), ("shelved", pgettext_lazy("Current status of an Instance", u"Shelved")), ("shelved_offloaded", pgettext_lazy("Current status of an Instance", u"Shelved Offloaded")), ) TASK_DISPLAY_NONE = pgettext_lazy("Task status of an Instance", u"None") # Mapping of task states taken from Nova's nova/compute/task_states.py TASK_DISPLAY_CHOICES = ( ("scheduling", pgettext_lazy("Task status of an Instance", u"Scheduling")), ("block_device_mapping", pgettext_lazy("Task status of an Instance", u"Block Device Mapping")), ("networking", pgettext_lazy("Task status of an Instance", u"Networking")), ("spawning", pgettext_lazy("Task status of an Instance", u"Spawning")), ("image_snapshot", pgettext_lazy("Task status of an Instance", u"Snapshotting")), ("image_snapshot_pending", pgettext_lazy("Task status of an Instance", u"Image Snapshot Pending")), ("image_pending_upload", pgettext_lazy("Task status of an Instance", u"Image Pending Upload")), ("image_uploading", pgettext_lazy("Task status of an Instance", u"Image Uploading")), ("image_backup", pgettext_lazy("Task status of an Instance", u"Image Backup")), ("updating_password", pgettext_lazy("Task status of an Instance", u"Updating Password")), ("resize_prep", pgettext_lazy("Task status of an Instance", u"Preparing Resize or Migrate")), ("resize_migrating", pgettext_lazy("Task status of an Instance", u"Resizing or Migrating")), ("resize_migrated", pgettext_lazy("Task status of an Instance", u"Resized or Migrated")), ("resize_finish", pgettext_lazy("Task status of an Instance", u"Finishing Resize or Migrate")), ("resize_reverting", pgettext_lazy("Task status of an Instance", u"Reverting Resize or Migrate")), ("resize_confirming", pgettext_lazy("Task status of an Instance", u"Confirming Resize or Migrate")), ("rebooting", pgettext_lazy("Task status of an Instance", u"Rebooting")), ("reboot_pending", pgettext_lazy("Task status of an Instance", u"Reboot Pending")), ("reboot_started", pgettext_lazy("Task status of an Instance", u"Reboot Started")), ("rebooting_hard", pgettext_lazy("Task status of an Instance", u"Rebooting Hard")), ("reboot_pending_hard", pgettext_lazy("Task status of an Instance", u"Reboot Pending Hard")), ("reboot_started_hard", pgettext_lazy("Task status of an Instance", u"Reboot Started Hard")), ("pausing", pgettext_lazy("Task status of an Instance", u"Pausing")), ("unpausing", pgettext_lazy("Task status of an Instance", u"Resuming")), ("suspending", pgettext_lazy("Task status of an Instance", u"Suspending")), ("resuming", pgettext_lazy("Task status of an Instance", u"Resuming")), ("powering-off", pgettext_lazy("Task status of an Instance", u"Powering Off")), ("powering-on", pgettext_lazy("Task status of an Instance", u"Powering On")), ("rescuing", pgettext_lazy("Task status of an Instance", u"Rescuing")), ("unrescuing", pgettext_lazy("Task status of an Instance", u"Unrescuing")), ("rebuilding", pgettext_lazy("Task status of an Instance", u"Rebuilding")), ("rebuild_block_device_mapping", pgettext_lazy( "Task status of an Instance", u"Rebuild Block Device Mapping")), ("rebuild_spawning", pgettext_lazy("Task status of an Instance", u"Rebuild Spawning")), ("migrating", pgettext_lazy("Task status of an Instance", u"Migrating")), ("deleting", pgettext_lazy("Task status of an Instance", u"Deleting")), ("soft-deleting", pgettext_lazy("Task status of an Instance", u"Soft Deleting")), ("restoring", pgettext_lazy("Task status of an Instance", u"Restoring")), ("shelving", pgettext_lazy("Task status of an Instance", u"Shelving")), ("shelving_image_pending_upload", pgettext_lazy( "Task status of an Instance", u"Shelving Image Pending Upload")), ("shelving_image_uploading", pgettext_lazy("Task status of an Instance", u"Shelving Image Uploading")), ("shelving_offloading", pgettext_lazy("Task status of an Instance", u"Shelving Offloading")), ("unshelving", pgettext_lazy("Task status of an Instance", u"Unshelving")), ) POWER_DISPLAY_CHOICES = ( ("NO STATE", pgettext_lazy("Power state of an Instance", u"No State")), ("RUNNING", pgettext_lazy("Power state of an Instance", u"Running")), ("BLOCKED", pgettext_lazy("Power state of an Instance", u"Blocked")), ("PAUSED", pgettext_lazy("Power state of an Instance", u"Paused")), ("SHUTDOWN", pgettext_lazy("Power state of an Instance", u"Shut Down")), ("SHUTOFF", pgettext_lazy("Power state of an Instance", u"Shut Off")), ("CRASHED", pgettext_lazy("Power state of an Instance", u"Crashed")), ("SUSPENDED", pgettext_lazy("Power state of an Instance", u"Suspended")), ("FAILED", pgettext_lazy("Power state of an Instance", u"Failed")), ("BUILDING", pgettext_lazy("Power state of an Instance", u"Building")), ) class InstancesFilterAction(tables.FilterAction): filter_type = "server" filter_choices = (('name', _("Instance Name"), True), ('status', _("Status ="), True), ('image', _("Image ID ="), True), ('flavor', _("Flavor ID ="), True)) class InstancesTable(tables.DataTable): TASK_STATUS_CHOICES = ( (None, True), ("none", True) ) STATUS_CHOICES = ( ("active", True), ("shutoff", True), ("suspended", True), ("paused", True), ("error", False), ("rescue", True), ("shelved", True), ("shelved_offloaded", True), ) name = tables.Column("name", link="horizon:project:instances:detail", verbose_name=_("Instance Name")) image_name = tables.Column("image_name", verbose_name=_("Image Name")) ip = tables.Column(get_ips, verbose_name=_("IP Address"), attrs={'data-type': "ip"}) size = tables.Column(get_size, verbose_name=_("Size"), attrs={'data-type': 'size'}) keypair = tables.Column(get_keyname, verbose_name=_("Key Pair")) status = tables.Column("status", filters=(title, filters.replace_underscores), verbose_name=_("Status"), status=True, status_choices=STATUS_CHOICES, display_choices=STATUS_DISPLAY_CHOICES) az = tables.Column("availability_zone", verbose_name=_("Availability Zone")) task = tables.Column("OS-EXT-STS:task_state", verbose_name=_("Task"), empty_value=TASK_DISPLAY_NONE, status=True, status_choices=TASK_STATUS_CHOICES, display_choices=TASK_DISPLAY_CHOICES) state = tables.Column(get_power_state, filters=(title, filters.replace_underscores), verbose_name=_("Power State"), display_choices=POWER_DISPLAY_CHOICES) created = tables.Column("created", verbose_name=_("Time since created"), filters=(filters.parse_isotime, filters.timesince_sortable), attrs={'data-type': 'timesince'}) class Meta(object): name = "instances" verbose_name = _("Instances") status_columns = ["status", "task"] row_class = UpdateRow table_actions_menu = (StartInstance, StopInstance, SoftRebootInstance) launch_actions = () if getattr(settings, 'LAUNCH_INSTANCE_LEGACY_ENABLED', True): launch_actions = (LaunchLink,) + launch_actions if getattr(settings, 'LAUNCH_INSTANCE_NG_ENABLED', False): launch_actions = (LaunchLinkNG,) + launch_actions table_actions = launch_actions + (TerminateInstance, InstancesFilterAction) row_actions = (StartInstance, ConfirmResize, RevertResize, CreateSnapshot, SimpleAssociateIP, AssociateIP, SimpleDisassociateIP, EditInstance, DecryptInstancePassword, EditInstanceSecurityGroups, ConsoleLink, LogLink, TogglePause, ToggleSuspend, ResizeLink, LockInstance, UnlockInstance, SoftRebootInstance, RebootInstance, StopInstance, RebuildInstance, TerminateInstance)
apache-2.0
franziz/arcrawler
lib/saver/blog.py
1
1508
from curtsies import fmtstr from ..monitor import Monitor import pymongo import bson.json_util import re import arrow class BlogSaver: def __init__(self, **kwargs): self.db_address = kwargs.get("db_address", "mongo:27017") self.db_name = kwargs.get("db_name", "news_crawler") def save(self, article=None): """ Exceptions: - AssertionError """ assert article is not None, "article is not defined." # monitor_conn = pymongo.MongoClient("mongodb://mongo:27017/monitor") # monitor_db = monitor_conn["monitor"] monitor = Monitor() conn = pymongo.MongoClient("mongodb://mongo:27017/blog_crawler") db = conn["blog_crawler"] # Ensuring index db.data.create_index([("permalink", pymongo.ASCENDING)], unique=True, background=True) db.data.create_index([("converted", pymongo.ASCENDING)], background=True) db.data.create_index("TTL",expireAfterSeconds=2592000, background=True) try: db.data.insert_one(article) monitor.capture_insert_document(article["_crawled_by"]) # monitor_db.status.update( # {"crawler_name": re.compile(article["_crawled_by"], re.IGNORECASE)}, # {"$set":{ # "crawler_name": article["_crawled_by"].title(), # "last_insert_time": arrow.utcnow().datetime # }}, # upsert=True # ) print(fmtstr("[BlogSaver][success] Inserted One Document!")) except pymongo.errors.DuplicateKeyError: print(fmtstr("[BlogSaver][error] Duplicate Document!","red")) finally: conn.close() # monitor_conn.close()
gpl-3.0
crdoconnor/olympia
lib/crypto/tasks.py
8
14127
import json import logging import os import shutil import zipfile from django.conf import settings from django.db.models import Q from lxml import etree import amo from addons.models import AddonUser from amo.celery import task from lib.crypto.packaged import sign_file from versions.compare import version_int from versions.models import Version log = logging.getLogger('z.task') MAIL_SUBJECT = u'Mozilla Add-ons: {addon} has been automatically signed on AMO' MAIL_MESSAGE = u""" Your add-on, {addon}, has been automatically signed for distribution in upcoming versions of Firefox. The signing process involved repackaging the add-on files and adding the string '.1-signed' to their versions numbers. The new versions have kept their review status and are now available for your users. We recommend that you give them a try to make sure they don't have any unexpected problems: {addon_url} If you are unfamiliar with the extension signing requirement, please read the following documents: * Signing announcement: http://blog.mozilla.org/addons/2015/02/10/extension-signing-safer-experience/ * Documentation page and FAQ: https://wiki.mozilla.org/Addons/Extension_Signing If you have any questions or comments on this, please reply to this email or join #amo-editors on irc.mozilla.org. You're receiving this email because you have an add-on hosted on https://addons.mozilla.org """ MAIL_UNSIGN_SUBJECT = u'Mozilla Add-ons: {addon} has been unsigned/reverted' MAIL_UNSIGN_MESSAGE = u""" Your add-on, {addon}, was automatically signed for distribution in upcoming versions of Firefox. However, we encountered an issue with older versions of Firefox, and had to revert this signature. We restored the backups we had for the signed versions. We recommend that you give them a try to make sure they don't have any unexpected problems: {addon_url} Link to the bug: https://bugzilla.mozilla.org/show_bug.cgi?id=1158467 If you have any questions or comments on this, please reply to this email or join #amo-editors on irc.mozilla.org. You're receiving this email because you have an add-on hosted on https://addons.mozilla.org and we had automatically signed it. """ @task def sign_addons(addon_ids, force=False, **kw): """Used to sign all the versions of an addon. This is used in the 'sign_addons' and 'process_addons --task sign_addons' management commands. It also bumps the version number of the file and the Version, so the Firefox extension update mecanism picks this new signed version and installs it. """ log.info(u'[{0}] Signing addons.'.format(len(addon_ids))) def file_supports_firefox(version): """Return a Q object: files supporting at least a firefox version.""" return Q(version__apps__max__application=amo.FIREFOX.id, version__apps__max__version_int__gte=version_int(version)) is_default_compatible = Q(binary_components=False, strict_compatibility=False) # We only want to sign files that are at least compatible with Firefox # MIN_D2C_VERSION, or Firefox MIN_NOT_D2C_VERSION if they are not default # to compatible. # The signing feature should be supported from Firefox 40 and above, but # we're still signing some files that are a bit older just in case. ff_version_filter = ( (is_default_compatible & file_supports_firefox(settings.MIN_D2C_VERSION)) | (~is_default_compatible & file_supports_firefox(settings.MIN_NOT_D2C_VERSION))) addons_emailed = [] # We only care about extensions. for version in Version.objects.filter(addon_id__in=addon_ids, addon__type=amo.ADDON_EXTENSION): # We only sign files that have been reviewed and are compatible with # versions of Firefox that are recent enough. to_sign = version.files.filter(ff_version_filter, status__in=amo.REVIEWED_STATUSES) if force: to_sign = to_sign.all() else: to_sign = to_sign.filter(is_signed=False) if not to_sign: log.info(u'Not signing addon {0}, version {1} (no files or already' u' signed)'.format(version.addon, version)) continue log.info(u'Signing addon {0}, version {1}'.format(version.addon, version)) bump_version = False # Did we sign at least one file? for file_obj in to_sign: if not os.path.isfile(file_obj.file_path): log.info(u'File {0} does not exist, skip'.format(file_obj.pk)) continue # Save the original file, before bumping the version. backup_path = u'{0}.backup_signature'.format(file_obj.file_path) shutil.copy(file_obj.file_path, backup_path) try: # Need to bump the version (modify install.rdf or package.json) # before the file is signed. bump_version_number(file_obj) if file_obj.status == amo.STATUS_PUBLIC: server = settings.SIGNING_SERVER else: server = settings.PRELIMINARY_SIGNING_SERVER signed = bool(sign_file(file_obj, server)) if signed: # Bump the version number if at least one signed. bump_version = True else: # We didn't sign, so revert the version bump. shutil.move(backup_path, file_obj.file_path) except: log.error(u'Failed signing file {0}'.format(file_obj.pk), exc_info=True) # Revert the version bump, restore the backup. shutil.move(backup_path, file_obj.file_path) # Now update the Version model, if we signed at least one file. if bump_version: bumped_version = _dot_one(version.version) version.update(version=bumped_version, version_int=version_int(bumped_version)) addon = version.addon if addon.pk not in addons_emailed: # Send a mail to the owners/devs warning them we've # automatically signed their addon. qs = (AddonUser.objects .filter(role=amo.AUTHOR_ROLE_OWNER, addon=addon) .exclude(user__email=None)) emails = qs.values_list('user__email', flat=True) subject = MAIL_SUBJECT.format(addon=addon.name) message = MAIL_MESSAGE.format( addon=addon.name, addon_url=amo.helpers.absolutify( addon.get_dev_url(action='versions'))) amo.utils.send_mail( subject, message, recipient_list=emails, fail_silently=True, headers={'Reply-To': 'amo-editors@mozilla.org'}) addons_emailed.append(addon.pk) def bump_version_number(file_obj): """Add a '.1-signed' to the version number.""" # Create a new xpi with the bumped version. bumped = u'{0}.bumped'.format(file_obj.file_path) # Copy the original XPI, with the updated install.rdf or package.json. with zipfile.ZipFile(file_obj.file_path, 'r') as source: file_list = source.infolist() with zipfile.ZipFile(bumped, 'w', zipfile.ZIP_DEFLATED) as dest: for file_ in file_list: content = source.read(file_.filename) if file_.filename == 'install.rdf': content = _bump_version_in_install_rdf(content) if file_.filename == 'package.json': content = _bump_version_in_package_json(content) dest.writestr(file_, content) # Move the bumped file to the original file. shutil.move(bumped, file_obj.file_path) def _dot_one(version): """Returns the version with an appended '.1-signed' on it.""" return u'{0}.1-signed'.format(version) def _bump_version_in_install_rdf(content): """Add a '.1-signed' to the version number in the install.rdf provided.""" # We need to use an XML parser, and not a RDF parser, because our # install.rdf files aren't really standard (they use default namespaces, # don't namespace the "about" attribute... rdflib can parse them, and can # now even serialize them, but the end result could be very different from # the format we need. tree = etree.fromstring(content) # There's two different formats for the install.rdf: the "standard" one # uses nodes for each item (like <em:version>1.2</em:version>), the other # alternate one sets attributes on the <RDF:Description # RDF:about="urn:mozilla:install-manifest"> element. # Get the version node, if it's the common format, or the Description node # that has the "em:version" attribute if it's the alternate format. namespace = 'http://www.mozilla.org/2004/em-rdf#' version_uri = '{{{0}}}version'.format(namespace) for node in tree.xpath('//em:version | //*[@em:version]', namespaces={'em': namespace}): if node.tag == version_uri: # Common format, version is a node. node.text = _dot_one(node.text) else: # Alternate format, version is an attribute. node.set(version_uri, _dot_one(node.get(version_uri))) return etree.tostring(tree, xml_declaration=True, encoding='utf-8') def _bump_version_in_package_json(content): """Add a '.1-signed' to the version number in the package.json provided.""" bumped = json.loads(content) if 'version' in bumped: bumped['version'] = _dot_one(bumped['version']) return json.dumps(bumped) @task def unsign_addons(addon_ids, force=False, **kw): """Used to unsign all the versions of an addon that were previously signed. This is used to revert the signing in case we need to. It first moves the backup of the signed file back over its original one, then un-bump the version, and finally re-hash the file. """ log.info(u'[{0}] Unsigning addons.'.format(len(addon_ids))) bumped_suffix = u'.1-signed' def file_supports_firefox(version): """Return a Q object: files supporting at least a firefox version.""" return Q(version__apps__max__application=amo.FIREFOX.id, version__apps__max__version_int__gte=version_int(version)) is_default_compatible = Q(binary_components=False, strict_compatibility=False) # We only want to unsign files that are at least compatible with Firefox # MIN_D2C_VERSION, or Firefox MIN_NOT_D2C_VERSION if they are not default # to compatible. # The signing feature should be supported from Firefox 40 and above, but # we're still signing some files that are a bit older just in case. ff_version_filter = ( (is_default_compatible & file_supports_firefox(settings.MIN_D2C_VERSION)) | (~is_default_compatible & file_supports_firefox(settings.MIN_NOT_D2C_VERSION))) addons_emailed = [] # We only care about extensions. for version in Version.objects.filter(addon_id__in=addon_ids, addon__type=amo.ADDON_EXTENSION): # We only unsign files that have been reviewed and are compatible with # versions of Firefox that are recent enough. if not version.version.endswith(bumped_suffix): log.info(u'Version {0} was not bumped, skip.'.format(version.pk)) continue to_unsign = version.files.filter(ff_version_filter, status__in=amo.REVIEWED_STATUSES) if force: to_unsign = to_unsign.all() else: to_unsign = to_unsign.filter(is_signed=False) if not to_unsign: log.info(u'Not unsigning addon {0}, version {1} (no files or not ' u'signed)'.format(version.addon, version)) continue log.info(u'Unsigning addon {0}, version {1}'.format(version.addon, version)) for file_obj in to_unsign: if not os.path.isfile(file_obj.file_path): log.info(u'File {0} does not exist, skip'.format(file_obj.pk)) continue backup_path = u'{0}.backup_signature'.format(file_obj.file_path) if not os.path.isfile(backup_path): log.info(u'Backup {0} does not exist, skip'.format( backup_path)) continue # Restore the backup. shutil.move(backup_path, file_obj.file_path) file_obj.update(cert_serial_num='', hash=file_obj.generate_hash()) # Now update the Version model, to unbump its version. unbumped_version = version.version[:-len(bumped_suffix)] version.update(version=unbumped_version, version_int=version_int(unbumped_version)) # Warn addon owners that we restored backups. addon = version.addon if addon.pk not in addons_emailed: # Send a mail to the owners/devs warning them we've # unsigned their addon and restored backups. qs = (AddonUser.objects .filter(role=amo.AUTHOR_ROLE_OWNER, addon=addon) .exclude(user__email=None)) emails = qs.values_list('user__email', flat=True) subject = MAIL_UNSIGN_SUBJECT.format(addon=addon.name) message = MAIL_UNSIGN_MESSAGE.format( addon=addon.name, addon_url=amo.helpers.absolutify( addon.get_dev_url(action='versions'))) amo.utils.send_mail( subject, message, recipient_list=emails, fail_silently=True, headers={'Reply-To': 'amo-editors@mozilla.org'}) addons_emailed.append(addon.pk)
bsd-3-clause
adamreese/kubernetes
examples/selenium/selenium-test.py
497
1089
#!/usr/bin/env python # Copyright 2015 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from selenium import webdriver from selenium.webdriver.common.desired_capabilities import DesiredCapabilities def check_browser(browser): driver = webdriver.Remote( command_executor='http://selenium-hub:4444/wd/hub', desired_capabilities=getattr(DesiredCapabilities, browser) ) driver.get("http://google.com") assert "google" in driver.page_source driver.close() print("Browser %s checks out!" % browser) check_browser("FIREFOX") check_browser("CHROME")
apache-2.0
aYukiSekiguchi/ACCESS-Chromium
native_client_sdk/src/build_tools/make_nacl_tools.py
8
7703
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Build NaCl tools (e.g. sel_ldr and ncval) at a given revision.""" import build_utils import optparse import os import shutil import subprocess import sys import tempfile bot = build_utils.BotAnnotator() # The suffix used for NaCl moduels that are installed, such as irt_core. NEXE_SUFFIX = '.nexe' def MakeInstallDirs(options): '''Create the necessary install directories in the SDK staging area. ''' install_dir = os.path.join(options.toolchain, 'bin'); if not os.path.exists(install_dir): os.makedirs(install_dir) runtime_dir = os.path.join(options.toolchain, 'runtime'); if not os.path.exists(runtime_dir): os.makedirs(runtime_dir) def Build(options): '''Build 32-bit and 64-bit versions of needed NaCL tools and libs.''' nacl_dir = os.path.join(options.nacl_dir, 'native_client') toolchain_option = 'naclsdk_mode=custom:%s' % options.toolchain libc_option = '' if options.lib == 'newlib' else ' --nacl_glibc' if sys.platform == 'win32': scons = os.path.join(nacl_dir, 'scons.bat') bits32 = 'vcvarsall.bat x86 && ' bits64 = 'vcvarsall.bat x86_amd64 && ' else: scons = os.path.join(nacl_dir, 'scons') bits32 = '' bits64 = '' # Build sel_ldr and ncval. def BuildTools(prefix, bits, target): cmd = '%s%s -j %s --mode=%s platform=x86-%s naclsdk_validate=0 %s %s%s' % ( prefix, scons, options.jobs, options.variant, bits, target, toolchain_option, libc_option) bot.Run(cmd, shell=True, cwd=nacl_dir) BuildTools(bits32, '32', 'sdl=none sel_ldr ncval') BuildTools(bits64, '64', 'sdl=none sel_ldr ncval') # Build irt_core, which is needed for running .nexes with sel_ldr. def BuildIRT(bits): cmd = '%s -j %s irt_core --mode=opt-host,nacl platform=x86-%s %s' % ( scons, options.jobs, bits, toolchain_option) bot.Run(cmd, shell=True, cwd=nacl_dir) # only build the IRT using the newlib chain. glibc does not support IRT. if options.lib == 'newlib': BuildIRT(32) BuildIRT(64) # Build and install untrusted libraries. def BuildAndInstallLibsAndHeaders(bits): cmd = ('%s install --mode=opt-host,nacl libdir=%s includedir=%s ' 'platform=x86-%s force_sel_ldr=none %s%s') % ( scons, os.path.join(options.toolchain, 'x86_64-nacl', 'lib32' if bits == 32 else 'lib'), os.path.join(options.toolchain, 'x86_64-nacl', 'include'), bits, toolchain_option, libc_option) bot.Run(cmd, shell=True, cwd=nacl_dir) BuildAndInstallLibsAndHeaders(32) BuildAndInstallLibsAndHeaders(64) def Install(options, tools=[], runtimes=[]): '''Install the NaCl tools and runtimes into the SDK staging area. Assumes that all necessary artifacts are built into the NaCl scons-out/staging directory, and copies them from there into the SDK staging area under toolchain. Args: options: The build options object. This is populated from command-line args at start-up. tools: A list of tool names, these should *not* have any executable suffix - this utility adds that (e.g. '.exe' on Windows). runtimes: A list of IRT runtimes. These artifacts should *not* have any suffix attached - this utility adds the '.nexe' suffix along with an ISA-specific string (e.g. '_x86_32'). ''' # TODO(bradnelson): add an 'install' alias to the main build for this. nacl_dir = os.path.join(options.nacl_dir, 'native_client') tool_build_path_32 = os.path.join(nacl_dir, 'scons-out', '%s-x86-32' % (options.variant), 'staging') tool_build_path_64 = os.path.join(nacl_dir, 'scons-out', '%s-x86-64' % (options.variant), 'staging') for nacl_tool in tools: shutil.copy(os.path.join(tool_build_path_32, '%s%s' % (nacl_tool, options.exe_suffix)), os.path.join(options.toolchain, 'bin', '%s_x86_32%s' % (nacl_tool, options.exe_suffix))) shutil.copy(os.path.join(tool_build_path_64, '%s%s' % (nacl_tool, options.exe_suffix)), os.path.join(options.toolchain, 'bin', '%s_x86_64%s' % (nacl_tool, options.exe_suffix))) irt_build_path_32 = os.path.join(nacl_dir, 'scons-out', 'nacl_irt-x86-32', 'staging') irt_build_path_64 = os.path.join(nacl_dir, 'scons-out', 'nacl_irt-x86-64', 'staging') for nacl_irt in runtimes: shutil.copy(os.path.join(irt_build_path_32, '%s%s' % (nacl_irt, NEXE_SUFFIX)), os.path.join(options.toolchain, 'runtime', '%s_x86_32%s' % (nacl_irt, NEXE_SUFFIX))) shutil.copy(os.path.join(irt_build_path_64, '%s%s' % (nacl_irt, NEXE_SUFFIX)), os.path.join(options.toolchain, 'runtime', '%s_x86_64%s' % (nacl_irt, NEXE_SUFFIX))) def BuildNaClTools(options): if(options.clean): bot.Print('Removing scons-out') scons_out = os.path.join(options.nacl_dir, 'native_client', 'scons-out') build_utils.CleanDirectory(scons_out) else: MakeInstallDirs(options) Build(options) Install(options, tools=['sel_ldr', 'ncval'], runtimes=['irt_core']) return 0 def main(argv): if sys.platform in ['win32', 'cygwin']: exe_suffix = '.exe' else: exe_suffix = '' script_dir = os.path.abspath(os.path.dirname(__file__)) parser = optparse.OptionParser() parser.add_option( '-t', '--toolchain', dest='toolchain', default='toolchain', help='where to put the NaCl tool binaries') parser.add_option( '-l', '--lib', dest='lib', default='newlib', help='whether to build against newlib (default) or glibc') parser.add_option( '-c', '--clean', action='store_true', dest='clean', default=False, help='whether to clean up the checkout files') parser.add_option( '-j', '--jobs', dest='jobs', default='1', help='Number of parallel jobs to use while building nacl tools') parser.add_option( '-n', '--nacl_dir', dest='nacl_dir', default=os.path.join(script_dir, 'packages', 'native_client'), help='Location of Native Client repository used for building tools') (options, args) = parser.parse_args(argv) if args: parser.print_help() bot.Print('ERROR: invalid argument(s): %s' % args) return 1 options.toolchain = os.path.abspath(options.toolchain) options.exe_suffix = exe_suffix # Pick variant. if sys.platform in ['win32', 'cygwin']: variant = 'dbg-win' elif sys.platform == 'darwin': variant = 'dbg-mac' elif sys.platform in ['linux', 'linux2']: variant = 'dbg-linux' else: assert False options.variant = variant if options.lib not in ['newlib', 'glibc']: bot.Print('ERROR: --lib must either be newlib or glibc') return 1 return BuildNaClTools(options) if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
bsd-3-clause
tempbottle/Nuitka
nuitka/nodes/IndicatorMixins.py
2
2961
# Copyright 2015, Kay Hayen, mailto:kay.hayen@gmail.com # # Part of "Nuitka", an optimizing Python compiler that is compatible and # integrates with CPython, but also works on its own. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Module for node class mixins that indicate runtime determined node facts. These come into play after finalization only. All of the these attributes (and we could use properties instead) are determined once or from a default and then used like this. """ class MarkLocalsDictIndicator: def __init__(self): self.needs_locals_dict = False def hasLocalsDict(self): return self.needs_locals_dict def markAsLocalsDict(self): self.needs_locals_dict = True class MarkGeneratorIndicator: """ Mixin for indication that a function/lambda is a generator. """ def __init__(self): self.is_generator = False self.needs_generator_return_exit = False def markAsGenerator(self): self.is_generator = True def isGenerator(self): return self.is_generator def markAsNeedsGeneratorReturnHandling(self, value): self.needs_generator_return_exit = max( self.needs_generator_return_exit, value ) def needsGeneratorReturnHandling(self): return self.needs_generator_return_exit == 2 def needsGeneratorReturnExit(self): return bool(self.needs_generator_return_exit) class MarkUnoptimizedFunctionIndicator: """ Mixin for indication that a function contains an exec or star import. These do not access global variables directly, but check a locals dictionary first, because they do. """ def __init__(self): self.unoptimized_locals = False self.unqualified_exec = False self.exec_source_ref = None def markAsExecContaining(self): self.unoptimized_locals = True def markAsUnqualifiedExecContaining(self, source_ref): self.unqualified_exec = True # Let the first one win. if self.exec_source_ref is None: self.exec_source_ref = source_ref markAsStarImportContaining = markAsExecContaining def isUnoptimized(self): return self.unoptimized_locals def isUnqualifiedExec(self): return self.unoptimized_locals and self.unqualified_exec def getExecSourceRef(self): return self.exec_source_ref
apache-2.0
UDST/choicemodels
tests/test_mnl_new.py
1
3024
""" These are tests for the refactored choicemodels MNL codebase. """ import numpy as np import pandas as pd import pytest from pandas.testing import assert_frame_equal from patsy import dmatrix from choicemodels import MultinomialLogit from choicemodels.tools import MergedChoiceTable @pytest.fixture def obs(): d1 = {'oid': np.arange(100), 'obsval': np.random.random(100), 'choice': np.random.choice(np.arange(5), size=100)} return pd.DataFrame(d1).set_index('oid') @pytest.fixture def alts(): d2 = {'aid': np.arange(5), 'altval': np.random.random(5)} return pd.DataFrame(d2).set_index('aid') def test_mnl(obs, alts): """ Confirm that MNL estimation runs, using the native estimator. """ model_expression = 'obsval + altval - 1' mct = MergedChoiceTable(obs, alts, 'choice') m = MultinomialLogit(mct, model_expression) print(m.fit()) def test_mnl_estimation(obs, alts): """ Confirm that estimated params from the new interface match urbansim.urbanchoice. Only runs if the urbansim package has been installed. """ try: from urbansim.urbanchoice.mnl import mnl_estimate except: print("Comparison of MNL estimation results skipped because urbansim is not installed") return model_expression = 'obsval + altval - 1' mct = MergedChoiceTable(obs, alts, 'choice') # new interface m = MultinomialLogit(mct, model_expression) r = m.fit().get_raw_results() # old interface dm = dmatrix(model_expression, mct.to_frame()) chosen = np.reshape(mct.to_frame()[mct.choice_col].values, (100, 5)) log_lik, fit = mnl_estimate(np.array(dm), chosen, numalts=5) for k,v in log_lik.items(): assert(v == pytest.approx(r['log_likelihood'][k], 0.00001)) assert_frame_equal(fit, r['fit_parameters'][['Coefficient', 'Std. Error', 'T-Score']]) def test_mnl_prediction(obs, alts): """ Confirm that fitted probabilities in the new codebase match urbansim.urbanchoice. Only runs if the urbansim package has been installed. """ try: from urbansim.urbanchoice.mnl import mnl_simulate except: print("Comparison of MNL simulation results skipped because urbansim is not installed") return # produce a fitted model mct = MergedChoiceTable(obs, alts, 'choice', 5) m = MultinomialLogit(mct, model_expression='obsval + altval - 1') results = m.fit() # get predicted probabilities using choicemodels probs1 = results.probabilities(mct) # compare to probabilities from urbansim.urbanchoice dm = dmatrix(results.model_expression, data=mct.to_frame(), return_type='dataframe') probs = mnl_simulate(data=dm, coeff=results.fitted_parameters, numalts=mct.sample_size, returnprobs=True) df = mct.to_frame() df['prob'] = probs.flatten() probs2 = df.prob pd.testing.assert_series_equal(probs1, probs2)
bsd-3-clause
patricksnape/menpo
menpo/image/base.py
2
131364
from typing import Iterable, Optional from warnings import warn import PIL.Image as PILImage import numpy as np from menpo.base import MenpoDeprecationWarning, Vectorizable, copy_landmarks_and_path from menpo.landmark import Landmarkable from menpo.shape import PointCloud, bounding_box from menpo.transform import ( AlignmentUniformScale, Homogeneous, NonUniformScale, Rotation, Translation, scale_about_centre, transform_about_centre, ) from menpo.visualize.base import ImageViewer, LandmarkableViewable, Viewable from .interpolation import scipy_interpolation try: from .interpolation import cv2_perspective_interpolation except ImportError: warn("Falling back to scipy interpolation for affine warps") cv2_perspective_interpolation = None # type: ignore from .patches import ( extract_patches_with_slice, set_patches, extract_patches_by_sampling, ) # Cache the greyscale luminosity coefficients as they are invariant. _greyscale_luminosity_coef: Optional[np.ndarray] = None class ImageBoundaryError(ValueError): r""" Exception that is thrown when an attempt is made to crop an image beyond the edge of it's boundary. Parameters ---------- requested_min : ``(d,)`` `ndarray` The per-dimension minimum index requested for the crop requested_max : ``(d,)`` `ndarray` The per-dimension maximum index requested for the crop snapped_min : ``(d,)`` `ndarray` The per-dimension minimum index that could be used if the crop was constrained to the image boundaries. requested_max : ``(d,)`` `ndarray` The per-dimension maximum index that could be used if the crop was constrained to the image boundaries. """ def __init__(self, requested_min, requested_max, snapped_min, snapped_max): super(ImageBoundaryError, self).__init__() self.requested_min = requested_min self.requested_max = requested_max self.snapped_min = snapped_min self.snapped_max = snapped_max def indices_for_image_of_shape(shape): r""" The indices of all pixels in an image with a given shape (without channel information). Parameters ---------- shape : ``(n_dims, n_pixels)`` `ndarray` The shape of the image. Returns ------- indices : `ndarray` The indices of all the pixels in the image. """ return np.indices(shape).reshape([len(shape), -1]).T def normalize_pixels_range(pixels, error_on_unknown_type=True): r""" Normalize the given pixels to the Menpo valid floating point range, [0, 1]. This is a single place to handle normalising pixels ranges. At the moment the supported types are uint8 and uint16. Parameters ---------- pixels : `ndarray` The pixels to normalize in the floating point range. error_on_unknown_type : `bool`, optional If ``True``, this method throws a ``ValueError`` if the given pixels array is an unknown type. If ``False``, this method performs no operation. Returns ------- normalized_pixels : `ndarray` The normalized pixels in the range [0, 1]. Raises ------ ValueError If ``pixels`` is an unknown type and ``error_on_unknown_type==True`` """ dtype = pixels.dtype if dtype == np.uint8: max_range = 255.0 elif dtype == np.uint16: max_range = 65535.0 else: if error_on_unknown_type: raise ValueError( "Unexpected dtype ({}) - normalisation range " "is unknown".format(dtype) ) else: # Do nothing return pixels # This multiplication is quite a bit faster than just dividing - will # automatically cast it up to float64 return pixels * (1.0 / max_range) def denormalize_pixels_range(pixels, out_dtype): """ Denormalize the given pixels array into the range of the given out dtype. If the given pixels are floating point or boolean then the values are scaled appropriately and cast to the output dtype. If the pixels are already the correct dtype they are immediately returned. Floating point pixels must be in the range [0, 1]. Currently uint8 and uint16 output dtypes are supported. Parameters ---------- pixels : `ndarray` The pixels to denormalize. out_dtype : `np.dtype` The numpy data type to output and scale the values into. Returns ------- out_pixels : `ndarray` Will be in the correct range and will have type ``out_dtype``. Raises ------ ValueError Pixels are floating point and range outside [0, 1] ValueError Input pixels dtype not in the set {float32, float64, bool}. ValueError Output dtype not in the set {uint8, uint16} """ in_dtype = pixels.dtype if in_dtype == out_dtype: return pixels if np.issubclass_(in_dtype.type, np.floating) or in_dtype == float: if np.issubclass_(out_dtype, np.floating) or out_dtype == float: return pixels.astype(out_dtype) else: p_min = pixels.min() p_max = pixels.max() if p_min < 0.0 or p_max > 1.0: raise ValueError( "Unexpected input range [{}, {}] - pixels " "must be in the range [0, 1]".format(p_min, p_max) ) elif in_dtype != bool: raise ValueError( "Unexpected input dtype ({}) - only float32, float64 " "and bool supported".format(in_dtype) ) if out_dtype == np.uint8: max_range = 255.0 elif out_dtype == np.uint16: max_range = 65535.0 else: raise ValueError( "Unexpected output dtype ({}) - normalisation range " "is unknown".format(out_dtype) ) return (pixels * max_range).astype(out_dtype) def channels_to_back(pixels): r""" Roll the channels from the front to the back for an image. If the image that is passed is already a numpy array, then that is also fine. Always returns a numpy array because our :map:`Image` containers do not support channels at the back. Parameters ---------- image : `ndarray` The pixels or image to roll the channel back for. Returns ------- rolled_pixels : `ndarray` The numpy array of pixels with the channels on the last axis. """ return np.require( np.rollaxis(pixels, 0, pixels.ndim), dtype=pixels.dtype, requirements=["C"] ) def channels_to_front(pixels): r""" Convert the given pixels array (channels assumed to be at the last axis as is common in other imaging packages) into a numpy array. Parameters ---------- pixels : ``(H, W, C)`` `buffer` The pixels to convert to the Menpo channels at axis 0. Returns ------- pixels : ``(C, H, W)`` `ndarray` Numpy array, channels as axis 0. """ if not isinstance(pixels, np.ndarray): pixels = np.array(pixels) return np.require(np.rollaxis(pixels, -1), dtype=pixels.dtype, requirements=["C"]) class Image(Vectorizable, Landmarkable, Viewable, LandmarkableViewable): r""" An n-dimensional image. Images are n-dimensional homogeneous regular arrays of data. Each spatially distinct location in the array is referred to as a `pixel`. At a pixel, ``k`` distinct pieces of information can be stored. Each datum at a pixel is refereed to as being in a `channel`. All pixels in the image have the same number of channels, and all channels have the same data-type (`float64`). Parameters ---------- image_data : ``(C, M, N ..., Q)`` `ndarray` Array representing the image pixels, with the first axis being channels. copy : `bool`, optional If ``False``, the ``image_data`` will not be copied on assignment. Note that this will miss out on additional checks. Further note that we still demand that the array is C-contiguous - if it isn't, a copy will be generated anyway. In general, this should only be used if you know what you are doing. Raises ------ Warning If ``copy=False`` cannot be honoured ValueError If the pixel array is malformed """ def __init__(self, image_data, copy=True): super(Image, self).__init__() if not copy: if not image_data.flags.c_contiguous: image_data = np.array(image_data, copy=True, order="C") warn( "The copy flag was NOT honoured. A copy HAS been made. " "Please ensure the data you pass is C-contiguous." ) else: image_data = np.array(image_data, copy=True, order="C") # Degenerate case whereby we can just put the extra axis # on ourselves if image_data.ndim == 2: # Ensures that the data STAYS C-contiguous image_data = image_data.reshape((1,) + image_data.shape) if image_data.ndim < 2: raise ValueError( "Pixel array has to be 2D (implicitly 1 channel, " "2D shape) or 3D+ (n_channels, 2D+ shape) " " - a {}D array " "was provided".format(image_data.ndim) ) self.pixels = image_data @classmethod def init_blank(cls, shape, n_channels=1, fill=0, dtype=float): r""" Returns a blank image. Parameters ---------- shape : `tuple` or `list` The shape of the image. Any floating point values are rounded up to the nearest integer. n_channels : `int`, optional The number of channels to create the image with. fill : `int`, optional The value to fill all pixels with. dtype : numpy data type, optional The data type of the image. Returns ------- blank_image : :map:`Image` A new image of the requested size. """ # Ensure that the '+' operator means concatenate tuples shape = tuple(np.ceil(shape).astype(int)) if fill == 0: pixels = np.zeros((n_channels,) + shape, dtype=dtype) else: pixels = np.ones((n_channels,) + shape, dtype=dtype) * fill # We know there is no need to copy... return cls(pixels, copy=False) @classmethod def init_from_rolled_channels(cls, pixels): r""" Deprecated - please use the equivalent ``init_from_channels_at_back`` method. """ warn( "This method is no longer supported and will be removed in a " "future version of Menpo. " "Use .init_from_channels_at_back instead.", MenpoDeprecationWarning, ) return cls.init_from_channels_at_back(pixels) @classmethod def init_from_channels_at_back(cls, pixels): r""" Create an Image from a set of pixels where the channels axis is on the last axis (the back). This is common in other frameworks, and therefore this method provides a convenient means of creating a menpo Image from such data. Note that a copy is always created due to the need to rearrange the data. Parameters ---------- pixels : ``(M, N ..., Q, C)`` `ndarray` Array representing the image pixels, with the last axis being channels. Returns ------- image : :map:`Image` A new image from the given pixels, with the FIRST axis as the channels. Raises ------ ValueError If image is not at least 2D, i.e. has at least 2 dimensions plus the channels in the end. """ if pixels.ndim == 2: pixels = pixels[..., None] if pixels.ndim < 2: raise ValueError( "Pixel array has to be 2D " "(2D shape, implicitly 1 channel) " "or 3D+ (2D+ shape, n_channels) " " - a {}D array " "was provided".format(pixels.ndim) ) return cls(channels_to_front(pixels)) @classmethod def init_from_pointcloud( cls, pointcloud, group=None, boundary=0, n_channels=1, fill=0, dtype=float, return_transform=False, ): r""" Create an Image that is big enough to contain the given pointcloud. The pointcloud will be translated to the origin and then translated according to its bounds in order to fit inside the new image. An optional boundary can be provided in order to increase the space around the boundary of the pointcloud. The boundary will be added to *all sides of the image* and so a boundary of 5 provides 10 pixels of boundary total for each dimension. Parameters ---------- pointcloud : :map:`PointCloud` Pointcloud to place inside the newly created image. group : `str`, optional If ``None``, the pointcloud will only be used to create the image. If a `str` then the pointcloud will be attached as a landmark group to the image, with the given string as key. boundary : `float` A optional padding distance that is added to the pointcloud bounds. Default is ``0``, meaning the max/min of tightest possible containing image is returned. n_channels : `int`, optional The number of channels to create the image with. fill : `int`, optional The value to fill all pixels with. dtype : numpy data type, optional The data type of the image. return_transform : `bool`, optional If ``True``, then the :map:`Transform` object that was used to adjust the PointCloud in order to build the image, is returned. Returns ------- image : ``type(cls)`` Image or subclass A new image with the same size as the given pointcloud, optionally with the pointcloud attached as landmarks. transform : :map:`Transform` The transform that was used. It only applies if `return_transform` is ``True``. """ # Translate pointcloud to the origin minimum = pointcloud.bounds(boundary=boundary)[0] tr = Translation(-minimum) origin_pc = tr.apply(pointcloud) image_shape = origin_pc.range(boundary=boundary) new_image = cls.init_blank( image_shape, n_channels=n_channels, fill=fill, dtype=dtype ) if group is not None: new_image.landmarks[group] = origin_pc if return_transform: return new_image, tr else: return new_image def as_masked(self, mask=None, copy=True): r""" Return a copy of this image with an attached mask behavior. A custom mask may be provided, or ``None``. See the :map:`MaskedImage` constructor for details of how the kwargs will be handled. Parameters ---------- mask : ``(self.shape)`` `ndarray` or :map:`BooleanImage` A mask to attach to the newly generated masked image. copy : `bool`, optional If ``False``, the produced :map:`MaskedImage` will share pixels with ``self``. Only suggested to be used for performance. Returns ------- masked_image : :map:`MaskedImage` An image with the same pixels and landmarks as this one, but with a mask. """ from menpo.image import MaskedImage return copy_landmarks_and_path( self, MaskedImage(self.pixels, mask=mask, copy=copy) ) @property def n_dims(self): r""" The number of dimensions in the image. The minimum possible ``n_dims`` is 2. :type: `int` """ return len(self.shape) @property def n_pixels(self): r""" Total number of pixels in the image ``(prod(shape),)`` :type: `int` """ return self.pixels[0, ...].size @property def n_elements(self): r""" Total number of data points in the image ``(prod(shape), n_channels)`` :type: `int` """ return self.pixels.size @property def n_channels(self): """ The number of channels on each pixel in the image. :type: `int` """ return self.pixels.shape[0] @property def width(self): r""" The width of the image. This is the width according to image semantics, and is thus the size of the **last** dimension. :type: `int` """ return self.pixels.shape[-1] @property def height(self): r""" The height of the image. This is the height according to image semantics, and is thus the size of the **second to last** dimension. :type: `int` """ return self.pixels.shape[-2] @property def shape(self): r""" The shape of the image (with ``n_channel`` values at each point). :type: `tuple` """ return self.pixels.shape[1:] def bounds(self): r""" The bounds of the image, minimum is always (0, 0). The maximum is the maximum **index** that can be used to index into the image for each dimension. Therefore, bounds will be of the form: ((0, 0), (self.height - 1, self.width - 1)) for a 2D image. Note that this is akin to supporting a nearest neighbour interpolation. Although the *actual* maximum subpixel value would be something like ``self.height - eps`` where ``eps`` is some value arbitrarily close to 0, this value at least allows sampling without worrying about floating point error. :type: `tuple` """ return (0,) * self.n_dims, tuple(s - 1 for s in self.shape) def diagonal(self): r""" The diagonal size of this image :type: `float` """ return np.sqrt(np.sum(np.array(self.shape) ** 2)) def centre(self): r""" The geometric centre of the Image - the subpixel that is in the middle. Useful for aligning shapes and images. :type: (``n_dims``,) `ndarray` """ return np.array(self.shape, dtype=np.double) / 2 def _str_shape(self): if self.n_dims > 2: return " x ".join(str(dim) for dim in self.shape) elif self.n_dims == 2: return "{}W x {}H".format(self.width, self.height) def indices(self): r""" Return the indices of all pixels in this image. :type: (``n_dims``, ``n_pixels``) ndarray """ return indices_for_image_of_shape(self.shape) def _as_vector(self, keep_channels=False): r""" The vectorized form of this image. Parameters ---------- keep_channels : `bool`, optional ========== ============================= Value Return shape ========== ============================= `False` ``(n_channels * n_pixels,)`` `True` ``(n_channels, n_pixels)`` ========== ============================= Returns ------- vec : (See ``keep_channels`` above) `ndarray` Flattened representation of this image, containing all pixel and channel information. """ if keep_channels: return self.pixels.reshape([self.n_channels, -1]) else: return self.pixels.ravel() def from_vector(self, vector, n_channels=None, copy=True): r""" Takes a flattened vector and returns a new image formed by reshaping the vector to the correct pixels and channels. The `n_channels` argument is useful for when we want to add an extra channel to an image but maintain the shape. For example, when calculating the gradient. Note that landmarks are transferred in the process. Parameters ---------- vector : ``(n_parameters,)`` `ndarray` A flattened vector of all pixels and channels of an image. n_channels : `int`, optional If given, will assume that vector is the same shape as this image, but with a possibly different number of channels. copy : `bool`, optional If ``False``, the vector will not be copied in creating the new image. Returns ------- image : :map:`Image` New image of same shape as this image and the number of specified channels. Raises ------ Warning If the ``copy=False`` flag cannot be honored """ # This is useful for when we want to add an extra channel to an image # but maintain the shape. For example, when calculating the gradient n_channels = self.n_channels if n_channels is None else n_channels image_data = vector.reshape((n_channels,) + self.shape) new_image = Image(image_data, copy=copy) new_image.landmarks = self.landmarks return new_image def _from_vector_inplace(self, vector, copy=True): r""" Takes a flattened vector and update this image by reshaping the vector to the correct dimensions. Parameters ---------- vector : ``(n_pixels,)`` `bool ndarray` A vector vector of all the pixels of a :map:`BooleanImage`. copy: `bool`, optional If ``False``, the vector will be set as the pixels. If ``True``, a copy of the vector is taken. Raises ------ Warning If ``copy=False`` flag cannot be honored Note ---- For :map:`BooleanImage` this is rebuilding a boolean image **itself** from boolean values. The mask is in no way interpreted in performing the operation, in contrast to :map:`MaskedImage`, where only the masked region is used in :meth:`from_vector_inplace` and :meth:`as_vector`. """ image_data = vector.reshape(self.pixels.shape) if not copy: if not image_data.flags.c_contiguous: warn( "The copy flag was NOT honoured. A copy HAS been made. " "Please ensure the data you pass is C-contiguous." ) image_data = np.array( image_data, copy=True, order="C", dtype=image_data.dtype ) else: image_data = np.array( image_data, copy=True, order="C", dtype=image_data.dtype ) self.pixels = image_data def extract_channels(self, channels): r""" A copy of this image with only the specified channels. Parameters ---------- channels : `int` or `[int]` The channel index or `list` of channel indices to retain. Returns ------- image : `type(self)` A copy of this image with only the channels requested. """ copy = self.copy() if not isinstance(channels, list): channels = [channels] # ensure we don't remove the channel axis copy.pixels = self.pixels[channels] return copy def as_histogram(self, keep_channels=True, bins="unique"): r""" Histogram binning of the values of this image. Parameters ---------- keep_channels : `bool`, optional If set to ``False``, it returns a single histogram for all the channels of the image. If set to ``True``, it returns a `list` of histograms, one for each channel. bins : ``{unique}``, positive `int` or sequence of scalars, optional If set equal to ``'unique'``, the bins of the histograms are centred on the unique values of each channel. If set equal to a positive `int`, then this is the number of bins. If set equal to a sequence of scalars, these will be used as bins centres. Returns ------- hist : `ndarray` or `list` with ``n_channels`` `ndarrays` inside The histogram(s). If ``keep_channels=False``, then hist is an `ndarray`. If ``keep_channels=True``, then hist is a `list` with ``len(hist)=n_channels``. bin_edges : `ndarray` or `list` with `n_channels` `ndarrays` inside An array or a list of arrays corresponding to the above histograms that store the bins' edges. Raises ------ ValueError Bins can be either 'unique', positive int or a sequence of scalars. Examples -------- Visualizing the histogram when a list of array bin edges is provided: >>> hist, bin_edges = image.as_histogram() >>> for k in range(len(hist)): >>> plt.subplot(1,len(hist),k) >>> width = 0.7 * (bin_edges[k][1] - bin_edges[k][0]) >>> centre = (bin_edges[k][:-1] + bin_edges[k][1:]) / 2 >>> plt.bar(centre, hist[k], align='center', width=width) """ # parse options if isinstance(bins, str): if bins == "unique": bins = 0 else: raise ValueError( "Bins can be either 'unique', positive int or" "a sequence of scalars." ) elif isinstance(bins, int) and bins < 1: raise ValueError( "Bins can be either 'unique', positive int or a " "sequence of scalars." ) # compute histogram vec = self.as_vector(keep_channels=keep_channels) if len(vec.shape) == 1 or vec.shape[0] == 1: if bins == 0: bins = np.unique(vec) hist, bin_edges = np.histogram(vec, bins=bins) else: hist = [] bin_edges = [] num_bins = bins for ch in range(vec.shape[0]): if bins == 0: num_bins = np.unique(vec[ch, :]) h_tmp, c_tmp = np.histogram(vec[ch, :], bins=num_bins) hist.append(h_tmp) bin_edges.append(c_tmp) return hist, bin_edges def _view_2d( self, figure_id=None, new_figure=False, channels=None, interpolation="bilinear", cmap_name=None, alpha=1.0, render_axes=False, axes_font_name="sans-serif", axes_font_size=10, axes_font_style="normal", axes_font_weight="normal", axes_x_limits=None, axes_y_limits=None, axes_x_ticks=None, axes_y_ticks=None, figure_size=(7, 7), ): r""" View the image using the default image viewer. This method will appear on the Image as ``view`` if the Image is 2D. Returns ------- figure_id : `object`, optional The id of the figure to be used. new_figure : `bool`, optional If ``True``, a new figure is created. channels : `int` or `list` of `int` or ``all`` or ``None`` If `int` or `list` of `int`, the specified channel(s) will be rendered. If ``all``, all the channels will be rendered in subplots. If ``None`` and the image is RGB, it will be rendered in RGB mode. If ``None`` and the image is not RGB, it is equivalent to ``all``. interpolation : See Below, optional The interpolation used to render the image. For example, if ``bilinear``, the image will be smooth and if ``nearest``, the image will be pixelated. Example options :: {none, nearest, bilinear, bicubic, spline16, spline36, hanning, hamming, hermite, kaiser, quadric, catrom, gaussian, bessel, mitchell, sinc, lanczos} cmap_name: `str`, optional, If ``None``, single channel and three channel images default to greyscale and rgb colormaps respectively. alpha : `float`, optional The alpha blending value, between 0 (transparent) and 1 (opaque). render_axes : `bool`, optional If ``True``, the axes will be rendered. axes_font_name : See Below, optional The font of the axes. Example options :: {serif, sans-serif, cursive, fantasy, monospace} axes_font_size : `int`, optional The font size of the axes. axes_font_style : {``normal``, ``italic``, ``oblique``}, optional The font style of the axes. axes_font_weight : See Below, optional The font weight of the axes. Example options :: {ultralight, light, normal, regular, book, medium, roman, semibold, demibold, demi, bold, heavy, extra bold, black} axes_x_limits : `float` or (`float`, `float`) or ``None``, optional The limits of the x axis. If `float`, then it sets padding on the right and left of the Image as a percentage of the Image's width. If `tuple` or `list`, then it defines the axis limits. If ``None``, then the limits are set automatically. axes_y_limits : (`float`, `float`) `tuple` or ``None``, optional The limits of the y axis. If `float`, then it sets padding on the top and bottom of the Image as a percentage of the Image's height. If `tuple` or `list`, then it defines the axis limits. If ``None``, then the limits are set automatically. axes_x_ticks : `list` or `tuple` or ``None``, optional The ticks of the x axis. axes_y_ticks : `list` or `tuple` or ``None``, optional The ticks of the y axis. figure_size : (`float`, `float`) `tuple` or ``None``, optional The size of the figure in inches. Returns ------- viewer : `ImageViewer` The image viewing object. """ return ImageViewer( figure_id, new_figure, self.n_dims, self.pixels, channels=channels ).render( interpolation=interpolation, cmap_name=cmap_name, alpha=alpha, render_axes=render_axes, axes_font_name=axes_font_name, axes_font_size=axes_font_size, axes_font_style=axes_font_style, axes_font_weight=axes_font_weight, axes_x_limits=axes_x_limits, axes_y_limits=axes_y_limits, axes_x_ticks=axes_x_ticks, axes_y_ticks=axes_y_ticks, figure_size=figure_size, ) def _view_landmarks_2d( self, channels=None, group=None, with_labels=None, without_labels=None, figure_id=None, new_figure=False, interpolation="bilinear", cmap_name=None, alpha=1.0, render_lines=True, line_colour=None, line_style="-", line_width=1, render_markers=True, marker_style="o", marker_size=5, marker_face_colour=None, marker_edge_colour=None, marker_edge_width=1.0, render_numbering=False, numbers_horizontal_align="center", numbers_vertical_align="bottom", numbers_font_name="sans-serif", numbers_font_size=10, numbers_font_style="normal", numbers_font_weight="normal", numbers_font_colour="k", render_legend=False, legend_title="", legend_font_name="sans-serif", legend_font_style="normal", legend_font_size=10, legend_font_weight="normal", legend_marker_scale=None, legend_location=2, legend_bbox_to_anchor=(1.05, 1.0), legend_border_axes_pad=None, legend_n_columns=1, legend_horizontal_spacing=None, legend_vertical_spacing=None, legend_border=True, legend_border_padding=None, legend_shadow=False, legend_rounded_corners=False, render_axes=False, axes_font_name="sans-serif", axes_font_size=10, axes_font_style="normal", axes_font_weight="normal", axes_x_limits=None, axes_y_limits=None, axes_x_ticks=None, axes_y_ticks=None, figure_size=(7, 7), ): """ Visualize the landmarks. This method will appear on the Image as ``view_landmarks`` if the Image is 2D. Parameters ---------- channels : `int` or `list` of `int` or ``all`` or ``None`` If `int` or `list` of `int`, the specified channel(s) will be rendered. If ``all``, all the channels will be rendered in subplots. If ``None`` and the image is RGB, it will be rendered in RGB mode. If ``None`` and the image is not RGB, it is equivalent to ``all``. group : `str` or``None`` optional The landmark group to be visualized. If ``None`` and there are more than one landmark groups, an error is raised. with_labels : ``None`` or `str` or `list` of `str`, optional If not ``None``, only show the given label(s). Should **not** be used with the ``without_labels`` kwarg. without_labels : ``None`` or `str` or `list` of `str`, optional If not ``None``, show all except the given label(s). Should **not** be used with the ``with_labels`` kwarg. figure_id : `object`, optional The id of the figure to be used. new_figure : `bool`, optional If ``True``, a new figure is created. interpolation : See Below, optional The interpolation used to render the image. For example, if ``bilinear``, the image will be smooth and if ``nearest``, the image will be pixelated. Example options :: {none, nearest, bilinear, bicubic, spline16, spline36, hanning, hamming, hermite, kaiser, quadric, catrom, gaussian, bessel, mitchell, sinc, lanczos} cmap_name: `str`, optional, If ``None``, single channel and three channel images default to greyscale and rgb colormaps respectively. alpha : `float`, optional The alpha blending value, between 0 (transparent) and 1 (opaque). render_lines : `bool`, optional If ``True``, the edges will be rendered. line_colour : See Below, optional The colour of the lines. Example options:: {r, g, b, c, m, k, w} or (3, ) ndarray line_style : ``{-, --, -., :}``, optional The style of the lines. line_width : `float`, optional The width of the lines. render_markers : `bool`, optional If ``True``, the markers will be rendered. marker_style : See Below, optional The style of the markers. Example options :: {., ,, o, v, ^, <, >, +, x, D, d, s, p, *, h, H, 1, 2, 3, 4, 8} marker_size : `int`, optional The size of the markers in points. marker_face_colour : See Below, optional The face (filling) colour of the markers. Example options :: {r, g, b, c, m, k, w} or (3, ) ndarray marker_edge_colour : See Below, optional The edge colour of the markers. Example options :: {r, g, b, c, m, k, w} or (3, ) ndarray marker_edge_width : `float`, optional The width of the markers' edge. render_numbering : `bool`, optional If ``True``, the landmarks will be numbered. numbers_horizontal_align : ``{center, right, left}``, optional The horizontal alignment of the numbers' texts. numbers_vertical_align : ``{center, top, bottom, baseline}``, optional The vertical alignment of the numbers' texts. numbers_font_name : See Below, optional The font of the numbers. Example options :: {serif, sans-serif, cursive, fantasy, monospace} numbers_font_size : `int`, optional The font size of the numbers. numbers_font_style : ``{normal, italic, oblique}``, optional The font style of the numbers. numbers_font_weight : See Below, optional The font weight of the numbers. Example options :: {ultralight, light, normal, regular, book, medium, roman, semibold, demibold, demi, bold, heavy, extra bold, black} numbers_font_colour : See Below, optional The font colour of the numbers. Example options :: {r, g, b, c, m, k, w} or (3, ) ndarray render_legend : `bool`, optional If ``True``, the legend will be rendered. legend_title : `str`, optional The title of the legend. legend_font_name : See below, optional The font of the legend. Example options :: {serif, sans-serif, cursive, fantasy, monospace} legend_font_style : ``{normal, italic, oblique}``, optional The font style of the legend. legend_font_size : `int`, optional The font size of the legend. legend_font_weight : See Below, optional The font weight of the legend. Example options :: {ultralight, light, normal, regular, book, medium, roman, semibold, demibold, demi, bold, heavy, extra bold, black} legend_marker_scale : `float`, optional The relative size of the legend markers with respect to the original legend_location : `int`, optional The location of the legend. The predefined values are: =============== == 'best' 0 'upper right' 1 'upper left' 2 'lower left' 3 'lower right' 4 'right' 5 'center left' 6 'center right' 7 'lower center' 8 'upper center' 9 'center' 10 =============== == legend_bbox_to_anchor : (`float`, `float`) `tuple`, optional The bbox that the legend will be anchored. legend_border_axes_pad : `float`, optional The pad between the axes and legend border. legend_n_columns : `int`, optional The number of the legend's columns. legend_horizontal_spacing : `float`, optional The spacing between the columns. legend_vertical_spacing : `float`, optional The vertical space between the legend entries. legend_border : `bool`, optional If ``True``, a frame will be drawn around the legend. legend_border_padding : `float`, optional The fractional whitespace inside the legend border. legend_shadow : `bool`, optional If ``True``, a shadow will be drawn behind legend. legend_rounded_corners : `bool`, optional If ``True``, the frame's corners will be rounded (fancybox). render_axes : `bool`, optional If ``True``, the axes will be rendered. axes_font_name : See Below, optional The font of the axes. Example options :: {serif, sans-serif, cursive, fantasy, monospace} axes_font_size : `int`, optional The font size of the axes. axes_font_style : ``{normal, italic, oblique}``, optional The font style of the axes. axes_font_weight : See Below, optional The font weight of the axes. Example options :: {ultralight, light, normal, regular, book, medium, roman, semibold,demibold, demi, bold, heavy, extra bold, black} axes_x_limits : `float` or (`float`, `float`) or ``None``, optional The limits of the x axis. If `float`, then it sets padding on the right and left of the Image as a percentage of the Image's width. If `tuple` or `list`, then it defines the axis limits. If ``None``, then the limits are set automatically. axes_y_limits : (`float`, `float`) `tuple` or ``None``, optional The limits of the y axis. If `float`, then it sets padding on the top and bottom of the Image as a percentage of the Image's height. If `tuple` or `list`, then it defines the axis limits. If ``None``, then the limits are set automatically. axes_x_ticks : `list` or `tuple` or ``None``, optional The ticks of the x axis. axes_y_ticks : `list` or `tuple` or ``None``, optional The ticks of the y axis. figure_size : (`float`, `float`) `tuple` or ``None`` optional The size of the figure in inches. Raises ------ ValueError If both ``with_labels`` and ``without_labels`` are passed. ValueError If the landmark manager doesn't contain the provided group label. """ from menpo.visualize import view_image_landmarks return view_image_landmarks( self, channels, False, group, with_labels, without_labels, figure_id, new_figure, interpolation, cmap_name, alpha, render_lines, line_colour, line_style, line_width, render_markers, marker_style, marker_size, marker_face_colour, marker_edge_colour, marker_edge_width, render_numbering, numbers_horizontal_align, numbers_vertical_align, numbers_font_name, numbers_font_size, numbers_font_style, numbers_font_weight, numbers_font_colour, render_legend, legend_title, legend_font_name, legend_font_style, legend_font_size, legend_font_weight, legend_marker_scale, legend_location, legend_bbox_to_anchor, legend_border_axes_pad, legend_n_columns, legend_horizontal_spacing, legend_vertical_spacing, legend_border, legend_border_padding, legend_shadow, legend_rounded_corners, render_axes, axes_font_name, axes_font_size, axes_font_style, axes_font_weight, axes_x_limits, axes_y_limits, axes_x_ticks, axes_y_ticks, figure_size, ) def crop( self, min_indices, max_indices, constrain_to_boundary=False, return_transform=False, ): r""" Return a cropped copy of this image using the given minimum and maximum indices. Landmarks are correctly adjusted so they maintain their position relative to the newly cropped image. Parameters ---------- min_indices : ``(n_dims,)`` `ndarray` The minimum index over each dimension. max_indices : ``(n_dims,)`` `ndarray` The maximum index over each dimension. constrain_to_boundary : `bool`, optional If ``True`` the crop will be snapped to not go beyond this images boundary. If ``False``, an :map:`ImageBoundaryError` will be raised if an attempt is made to go beyond the edge of the image. return_transform : `bool`, optional If ``True``, then the :map:`Transform` object that was used to perform the cropping is also returned. Returns ------- cropped_image : `type(self)` A new instance of self, but cropped. transform : :map:`Transform` The transform that was used. It only applies if `return_transform` is ``True``. Raises ------ ValueError ``min_indices`` and ``max_indices`` both have to be of length ``n_dims``. All ``max_indices`` must be greater than ``min_indices``. ImageBoundaryError Raised if ``constrain_to_boundary=False``, and an attempt is made to crop the image in a way that violates the image bounds. """ min_indices = np.floor(min_indices) max_indices = np.ceil(max_indices) if not (min_indices.size == max_indices.size == self.n_dims): raise ValueError( "Both min and max indices should be 1D numpy arrays of" " length n_dims ({})".format(self.n_dims) ) elif not np.all(max_indices > min_indices): raise ValueError("All max indices must be greater that the min " "indices") min_bounded = self.constrain_points_to_bounds(min_indices) max_bounded = self.constrain_points_to_bounds(max_indices) all_max_bounded = np.all(min_bounded == min_indices) all_min_bounded = np.all(max_bounded == max_indices) if not (constrain_to_boundary or all_max_bounded or all_min_bounded): # points have been constrained and the user didn't want this - raise ImageBoundaryError(min_indices, max_indices, min_bounded, max_bounded) new_shape = (max_bounded - min_bounded).astype(int) return self.warp_to_shape( new_shape, Translation(min_bounded), order=0, warp_landmarks=True, return_transform=return_transform, ) def crop_to_pointcloud( self, pointcloud, boundary=0, constrain_to_boundary=True, return_transform=False ): r""" Return a copy of this image cropped so that it is bounded around a pointcloud with an optional ``n_pixel`` boundary. Parameters ---------- pointcloud : :map:`PointCloud` The pointcloud to crop around. boundary : `int`, optional An extra padding to be added all around the landmarks bounds. constrain_to_boundary : `bool`, optional If ``True`` the crop will be snapped to not go beyond this images boundary. If ``False``, an :map`ImageBoundaryError` will be raised if an attempt is made to go beyond the edge of the image. return_transform : `bool`, optional If ``True``, then the :map:`Transform` object that was used to perform the cropping is also returned. Returns ------- image : :map:`Image` A copy of this image cropped to the bounds of the pointcloud. transform : :map:`Transform` The transform that was used. It only applies if `return_transform` is ``True``. Raises ------ ImageBoundaryError Raised if ``constrain_to_boundary=False``, and an attempt is made to crop the image in a way that violates the image bounds. """ min_indices, max_indices = pointcloud.bounds(boundary=boundary) return self.crop( min_indices, max_indices, constrain_to_boundary=constrain_to_boundary, return_transform=return_transform, ) def crop_to_landmarks( self, group=None, boundary=0, constrain_to_boundary=True, return_transform=False ): r""" Return a copy of this image cropped so that it is bounded around a set of landmarks with an optional ``n_pixel`` boundary Parameters ---------- group : `str`, optional The key of the landmark set that should be used. If ``None`` and if there is only one set of landmarks, this set will be used. boundary : `int`, optional An extra padding to be added all around the landmarks bounds. constrain_to_boundary : `bool`, optional If ``True`` the crop will be snapped to not go beyond this images boundary. If ``False``, an :map`ImageBoundaryError` will be raised if an attempt is made to go beyond the edge of the image. return_transform : `bool`, optional If ``True``, then the :map:`Transform` object that was used to perform the cropping is also returned. Returns ------- image : :map:`Image` A copy of this image cropped to its landmarks. transform : :map:`Transform` The transform that was used. It only applies if `return_transform` is ``True``. Raises ------ ImageBoundaryError Raised if ``constrain_to_boundary=False``, and an attempt is made to crop the image in a way that violates the image bounds. """ pc = self.landmarks[group] return self.crop_to_pointcloud( pc, boundary=boundary, constrain_to_boundary=constrain_to_boundary, return_transform=return_transform, ) def crop_to_pointcloud_proportion( self, pointcloud, boundary_proportion, minimum=True, constrain_to_boundary=True, return_transform=False, ): r""" Return a copy of this image cropped so that it is bounded around a pointcloud with a border proportional to the pointcloud spread or range. Parameters ---------- pointcloud : :map:`PointCloud` The pointcloud to crop around. boundary_proportion : `float` Additional padding to be added all around the landmarks bounds defined as a proportion of the landmarks range. See the minimum parameter for a definition of how the range is calculated. minimum : `bool`, optional If ``True`` the specified proportion is relative to the minimum value of the pointclouds' per-dimension range; if ``False`` w.r.t. the maximum value of the pointclouds' per-dimension range. constrain_to_boundary : `bool`, optional If ``True``, the crop will be snapped to not go beyond this images boundary. If ``False``, an :map:`ImageBoundaryError` will be raised if an attempt is made to go beyond the edge of the image. return_transform : `bool`, optional If ``True``, then the :map:`Transform` object that was used to perform the cropping is also returned. Returns ------- image : :map:`Image` A copy of this image cropped to the border proportional to the pointcloud spread or range. transform : :map:`Transform` The transform that was used. It only applies if `return_transform` is ``True``. Raises ------ ImageBoundaryError Raised if ``constrain_to_boundary=False``, and an attempt is made to crop the image in a way that violates the image bounds. """ if minimum: boundary = boundary_proportion * np.min(pointcloud.range()) else: boundary = boundary_proportion * np.max(pointcloud.range()) return self.crop_to_pointcloud( pointcloud, boundary=boundary, constrain_to_boundary=constrain_to_boundary, return_transform=return_transform, ) def crop_to_landmarks_proportion( self, boundary_proportion, group=None, minimum=True, constrain_to_boundary=True, return_transform=False, ): r""" Crop this image to be bounded around a set of landmarks with a border proportional to the landmark spread or range. Parameters ---------- boundary_proportion : `float` Additional padding to be added all around the landmarks bounds defined as a proportion of the landmarks range. See the minimum parameter for a definition of how the range is calculated. group : `str`, optional The key of the landmark set that should be used. If ``None`` and if there is only one set of landmarks, this set will be used. minimum : `bool`, optional If ``True`` the specified proportion is relative to the minimum value of the landmarks' per-dimension range; if ``False`` w.r.t. the maximum value of the landmarks' per-dimension range. constrain_to_boundary : `bool`, optional If ``True``, the crop will be snapped to not go beyond this images boundary. If ``False``, an :map:`ImageBoundaryError` will be raised if an attempt is made to go beyond the edge of the image. return_transform : `bool`, optional If ``True``, then the :map:`Transform` object that was used to perform the cropping is also returned. Returns ------- image : :map:`Image` This image, cropped to its landmarks with a border proportional to the landmark spread or range. transform : :map:`Transform` The transform that was used. It only applies if `return_transform` is ``True``. Raises ------ ImageBoundaryError Raised if ``constrain_to_boundary=False``, and an attempt is made to crop the image in a way that violates the image bounds. """ pc = self.landmarks[group] return self.crop_to_pointcloud_proportion( pc, boundary_proportion, minimum=minimum, constrain_to_boundary=constrain_to_boundary, return_transform=return_transform, ) def constrain_points_to_bounds(self, points): r""" Constrains the points provided to be within the bounds of this image. Parameters ---------- points : ``(d,)`` `ndarray` Points to be snapped to the image boundaries. Returns ------- bounded_points : ``(d,)`` `ndarray` Points snapped to not stray outside the image edges. """ bounded_points = points.copy() # check we don't stray under any edges bounded_points[bounded_points < 0] = 0 # check we don't stray over any edges shape = np.array(self.shape) over_image = (shape - bounded_points) < 0 bounded_points[over_image] = shape[over_image] return bounded_points def extract_patches( self, patch_centers, patch_shape=(16, 16), sample_offsets=None, as_single_array=True, order=0, mode="constant", cval=0.0, ): r""" Extract a set of patches from an image. Given a set of patch centers and a patch size, patches are extracted from within the image, centred on the given coordinates. Sample offsets denote a set of offsets to extract from within a patch. This is very useful if you want to extract a dense set of features around a set of landmarks and simply sample the same grid of patches around the landmarks. If sample offsets are used, to access the offsets for each patch you need to slice the resulting `list`. So for 2 offsets, the first centers offset patches would be ``patches[:2]``. Currently only 2D images are supported. Note that the default is nearest neighbour sampling for the patches which is achieved via slicing and is much more efficient than using sampling/interpolation. Note that a significant performance decrease will be measured if the ``order`` or ``mode`` parameters are modified from ``order = 0`` and ``mode = 'constant'`` as internally sampling will be used rather than slicing. Parameters ---------- patch_centers : :map:`PointCloud` The centers to extract patches around. patch_shape : ``(1, n_dims)`` `tuple` or `ndarray`, optional The size of the patch to extract sample_offsets : ``(n_offsets, n_dims)`` `ndarray` or ``None``, optional The offsets to sample from within a patch. So ``(0, 0)`` is the centre of the patch (no offset) and ``(1, 0)`` would be sampling the patch from 1 pixel up the first axis away from the centre. If ``None``, then no offsets are applied. as_single_array : `bool`, optional If ``True``, an ``(n_center, n_offset, n_channels, patch_shape)`` `ndarray`, thus a single numpy array is returned containing each patch. If ``False``, a `list` of ``n_center * n_offset`` :map:`Image` objects is returned representing each patch. order : `int`, optional The order of interpolation. The order has to be in the range [0,5]. See warp_to_shape for more information. mode : ``{constant, nearest, reflect, wrap}``, optional Points outside the boundaries of the input are filled according to the given mode. cval : `float`, optional Used in conjunction with mode ``constant``, the value outside the image boundaries. Returns ------- patches : `list` or `ndarray` Returns the extracted patches. Returns a list if ``as_single_array=True`` and an `ndarray` if ``as_single_array=False``. Raises ------ ValueError If image is not 2D """ if self.n_dims != 2: raise ValueError( "Only two dimensional patch extraction is " "currently supported." ) if order == 0 and mode == "constant": # Fast path using slicing single_array = extract_patches_with_slice( self.pixels, patch_centers.points, patch_shape, offsets=sample_offsets, cval=cval, ) else: single_array = extract_patches_by_sampling( self.pixels, patch_centers.points, patch_shape, offsets=sample_offsets, order=order, mode=mode, cval=cval, ) if as_single_array: return single_array else: return [Image(o, copy=False) for p in single_array for o in p] def extract_patches_around_landmarks( self, group=None, patch_shape=(16, 16), sample_offsets=None, as_single_array=True, ): r""" Extract patches around landmarks existing on this image. Provided the group label and optionally the landmark label extract a set of patches. See `extract_patches` for more information. Currently only 2D images are supported. Parameters ---------- group : `str` or ``None``, optional The landmark group to use as patch centres. patch_shape : `tuple` or `ndarray`, optional The size of the patch to extract sample_offsets : ``(n_offsets, n_dims)`` `ndarray` or ``None``, optional The offsets to sample from within a patch. So ``(0, 0)`` is the centre of the patch (no offset) and ``(1, 0)`` would be sampling the patch from 1 pixel up the first axis away from the centre. If ``None``, then no offsets are applied. as_single_array : `bool`, optional If ``True``, an ``(n_center, n_offset, n_channels, patch_shape)`` `ndarray`, thus a single numpy array is returned containing each patch. If ``False``, a `list` of ``n_center * n_offset`` :map:`Image` objects is returned representing each patch. Returns ------- patches : `list` or `ndarray` Returns the extracted patches. Returns a list if ``as_single_array=True`` and an `ndarray` if ``as_single_array=False``. Raises ------ ValueError If image is not 2D """ return self.extract_patches( self.landmarks[group], patch_shape=patch_shape, sample_offsets=sample_offsets, as_single_array=as_single_array, ) def set_patches(self, patches, patch_centers, offset=None, offset_index=None): r""" Set the values of a group of patches into the correct regions of a copy of this image. Given an array of patches and a set of patch centers, the patches' values are copied in the regions of the image that are centred on the coordinates of the given centers. The patches argument can have any of the two formats that are returned from the `extract_patches()` and `extract_patches_around_landmarks()` methods. Specifically it can be: 1. ``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray` 2. `list` of ``n_center * n_offset`` :map:`Image` objects Currently only 2D images are supported. Parameters ---------- patches : `ndarray` or `list` The values of the patches. It can have any of the two formats that are returned from the `extract_patches()` and `extract_patches_around_landmarks()` methods. Specifically, it can either be an ``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray` or a `list` of ``n_center * n_offset`` :map:`Image` objects. patch_centers : :map:`PointCloud` The centers to set the patches around. offset : `list` or `tuple` or ``(1, 2)`` `ndarray` or ``None``, optional The offset to apply on the patch centers within the image. If ``None``, then ``(0, 0)`` is used. offset_index : `int` or ``None``, optional The offset index within the provided `patches` argument, thus the index of the second dimension from which to sample. If ``None``, then ``0`` is used. Raises ------ ValueError If image is not 2D ValueError If offset does not have shape (1, 2) """ # parse arguments if self.n_dims != 2: raise ValueError( "Only two dimensional patch insertion is " "currently supported." ) if offset is None: offset = np.zeros([1, 2], dtype=np.intp) elif isinstance(offset, tuple) or isinstance(offset, list): offset = np.asarray([offset]) offset = np.require(offset, dtype=np.intp) if not offset.shape == (1, 2): raise ValueError( "The offset must be a tuple, a list or a " "numpy.array with shape (1, 2)." ) if offset_index is None: offset_index = 0 # if patches is a list, convert it to array if isinstance(patches, list): patches = _convert_patches_list_to_single_array( patches, patch_centers.n_points ) copy = self.copy() # set patches set_patches(patches, copy.pixels, patch_centers.points, offset, offset_index) return copy def set_patches_around_landmarks( self, patches, group=None, offset=None, offset_index=None ): r""" Set the values of a group of patches around the landmarks existing in a copy of this image. Given an array of patches, a group and a label, the patches' values are copied in the regions of the image that are centred on the coordinates of corresponding landmarks. The patches argument can have any of the two formats that are returned from the `extract_patches()` and `extract_patches_around_landmarks()` methods. Specifically it can be: 1. ``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray` 2. `list` of ``n_center * n_offset`` :map:`Image` objects Currently only 2D images are supported. Parameters ---------- patches : `ndarray` or `list` The values of the patches. It can have any of the two formats that are returned from the `extract_patches()` and `extract_patches_around_landmarks()` methods. Specifically, it can either be an ``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray` or a `list` of ``n_center * n_offset`` :map:`Image` objects. group : `str` or ``None`` optional The landmark group to use as patch centres. offset : `list` or `tuple` or ``(1, 2)`` `ndarray` or ``None``, optional The offset to apply on the patch centers within the image. If ``None``, then ``(0, 0)`` is used. offset_index : `int` or ``None``, optional The offset index within the provided `patches` argument, thus the index of the second dimension from which to sample. If ``None``, then ``0`` is used. Raises ------ ValueError If image is not 2D ValueError If offset does not have shape (1, 2) """ return self.set_patches( patches, self.landmarks[group], offset=offset, offset_index=offset_index ) def warp_to_mask( self, template_mask, transform, warp_landmarks=True, order=1, mode="constant", cval=0.0, batch_size=None, return_transform=False, ): r""" Return a copy of this image warped into a different reference space. Note that warping into a mask is slower than warping into a full image. If you don't need a non-linear mask, consider :meth:``warp_to_shape`` instead. Parameters ---------- template_mask : :map:`BooleanImage` Defines the shape of the result, and what pixels should be sampled. transform : :map:`Transform` Transform **from the template space back to this image**. Defines, for each pixel location on the template, which pixel location should be sampled from on this image. warp_landmarks : `bool`, optional If ``True``, result will have the same landmark dictionary as ``self``, but with each landmark updated to the warped position. order : `int`, optional The order of interpolation. The order has to be in the range [0,5] ========= ===================== Order Interpolation ========= ===================== 0 Nearest-neighbor 1 Bi-linear *(default)* 2 Bi-quadratic 3 Bi-cubic 4 Bi-quartic 5 Bi-quintic ========= ===================== mode : ``{constant, nearest, reflect, wrap}``, optional Points outside the boundaries of the input are filled according to the given mode. cval : `float`, optional Used in conjunction with mode ``constant``, the value outside the image boundaries. batch_size : `int` or ``None``, optional This should only be considered for large images. Setting this value can cause warping to become much slower, particular for cached warps such as Piecewise Affine. This size indicates how many points in the image should be warped at a time, which keeps memory usage low. If ``None``, no batching is used and all points are warped at once. return_transform : `bool`, optional This argument is for internal use only. If ``True``, then the :map:`Transform` object is also returned. Returns ------- warped_image : :map:`MaskedImage` A copy of this image, warped. transform : :map:`Transform` The transform that was used. It only applies if `return_transform` is ``True``. """ if self.n_dims != transform.n_dims: raise ValueError( "Trying to warp a {}D image with a {}D transform " "(they must match)".format(self.n_dims, transform.n_dims) ) template_points = template_mask.true_indices() points_to_sample = transform.apply(template_points, batch_size=batch_size) sampled = self.sample(points_to_sample, order=order, mode=mode, cval=cval) # set any nan values to 0 sampled[np.isnan(sampled)] = 0 # build a warped version of the image warped_image = self._build_warp_to_mask(template_mask, sampled) if warp_landmarks and self.has_landmarks: warped_image.landmarks = self.landmarks transform.pseudoinverse()._apply_inplace(warped_image.landmarks) if hasattr(self, "path"): warped_image.path = self.path # optionally return the transform if return_transform: return warped_image, transform else: return warped_image def _build_warp_to_mask(self, template_mask, sampled_pixel_values): r""" Builds the warped image from the template mask and sampled pixel values. Overridden for :map:`BooleanImage` as we can't use the usual :meth:`from_vector_inplace` method. All other :map:`Image` classes share the :map:`Image` implementation. Parameters ---------- template_mask : :map:`BooleanImage` or 2D `bool ndarray` Mask for warping. sampled_pixel_values : ``(n_true_pixels_in_mask,)`` `ndarray` Sampled value to rebuild the masked image from. """ from menpo.image import MaskedImage warped_image = MaskedImage.init_blank( template_mask.shape, n_channels=self.n_channels, mask=template_mask ) warped_image._from_vector_inplace(sampled_pixel_values.ravel()) return warped_image def sample(self, points_to_sample, order=1, mode="constant", cval=0.0): r""" Sample this image at the given sub-pixel accurate points. The input PointCloud should have the same number of dimensions as the image e.g. a 2D PointCloud for a 2D multi-channel image. A numpy array will be returned the has the values for every given point across each channel of the image. Parameters ---------- points_to_sample : :map:`PointCloud` Array of points to sample from the image. Should be `(n_points, n_dims)` order : `int`, optional The order of interpolation. The order has to be in the range [0,5]. See warp_to_shape for more information. mode : ``{constant, nearest, reflect, wrap}``, optional Points outside the boundaries of the input are filled according to the given mode. cval : `float`, optional Used in conjunction with mode ``constant``, the value outside the image boundaries. Returns ------- sampled_pixels : (`n_points`, `n_channels`) `ndarray` The interpolated values taken across every channel of the image. """ # The public interface is a PointCloud, but when this is used internally # a numpy array is passed. So let's just treat the PointCloud as a # 'special case' and not document the ndarray ability. if isinstance(points_to_sample, PointCloud): points_to_sample = points_to_sample.points return scipy_interpolation( self.pixels, points_to_sample, order=order, mode=mode, cval=cval ) def warp_to_shape( self, template_shape, transform, warp_landmarks=True, order=1, mode="constant", cval=0.0, batch_size=None, return_transform=False, ): """ Return a copy of this image warped into a different reference space. Parameters ---------- template_shape : `tuple` or `ndarray` Defines the shape of the result, and what pixel indices should be sampled (all of them). transform : :map:`Transform` Transform **from the template_shape space back to this image**. Defines, for each index on template_shape, which pixel location should be sampled from on this image. warp_landmarks : `bool`, optional If ``True``, result will have the same landmark dictionary as self, but with each landmark updated to the warped position. order : `int`, optional The order of interpolation. The order has to be in the range [0,5] ========= ==================== Order Interpolation ========= ==================== 0 Nearest-neighbor 1 Bi-linear *(default)* 2 Bi-quadratic 3 Bi-cubic 4 Bi-quartic 5 Bi-quintic ========= ==================== mode : ``{constant, nearest, reflect, wrap}``, optional Points outside the boundaries of the input are filled according to the given mode. cval : `float`, optional Used in conjunction with mode ``constant``, the value outside the image boundaries. batch_size : `int` or ``None``, optional This should only be considered for large images. Setting this value can cause warping to become much slower, particular for cached warps such as Piecewise Affine. This size indicates how many points in the image should be warped at a time, which keeps memory usage low. If ``None``, no batching is used and all points are warped at once. return_transform : `bool`, optional This argument is for internal use only. If ``True``, then the :map:`Transform` object is also returned. Returns ------- warped_image : `type(self)` A copy of this image, warped. transform : :map:`Transform` The transform that was used. It only applies if `return_transform` is ``True``. """ template_shape = np.array(template_shape, dtype=int) if ( isinstance(transform, Homogeneous) and order in range(2) and self.n_dims == 2 and cv2_perspective_interpolation is not None ): # we couldn't do the crop, but OpenCV has an optimised # interpolation for 2D perspective warps - let's use that warped_pixels = cv2_perspective_interpolation( self.pixels, template_shape, transform, order=order, mode=mode, cval=cval, ) else: template_points = indices_for_image_of_shape(template_shape) points_to_sample = transform.apply(template_points, batch_size=batch_size) sampled = self.sample(points_to_sample, order=order, mode=mode, cval=cval) # set any nan values to 0 # (seems that map_coordinates can produce nan values) sampled[np.isnan(sampled)] = 0 # build a warped version of the image warped_pixels = sampled.reshape((self.n_channels,) + tuple(template_shape)) return self._build_warp_to_shape( warped_pixels, transform, warp_landmarks, return_transform ) def _build_warp_to_shape( self, warped_pixels, transform, warp_landmarks, return_transform ): # factored out common logic from the different paths we can take in # warp_to_shape. Rebuilds an image post-warp, adjusting landmarks # as necessary. warped_image = Image(warped_pixels, copy=False) # warp landmarks if requested. if warp_landmarks and self.has_landmarks: warped_image.landmarks = self.landmarks transform.pseudoinverse()._apply_inplace(warped_image.landmarks) if hasattr(self, "path"): warped_image.path = self.path # optionally return the transform if return_transform: return warped_image, transform else: return warped_image def rescale( self, scale, round="ceil", order=1, warp_landmarks=True, return_transform=False ): r""" Return a copy of this image, rescaled by a given factor. Landmarks are rescaled appropriately. Parameters ---------- scale : `float` or `tuple` of `floats` The scale factor. If a tuple, the scale to apply to each dimension. If a single `float`, the scale will be applied uniformly across each dimension. round: ``{ceil, floor, round}``, optional Rounding function to be applied to floating point shapes. order : `int`, optional The order of interpolation. The order has to be in the range [0,5] ========= ==================== Order Interpolation ========= ==================== 0 Nearest-neighbor 1 Bi-linear *(default)* 2 Bi-quadratic 3 Bi-cubic 4 Bi-quartic 5 Bi-quintic ========= ==================== warp_landmarks : `bool`, optional If ``True``, result will have the same landmark dictionary as self, but with each landmark updated to the warped position. return_transform : `bool`, optional If ``True``, then the :map:`Transform` object that was used to perform the rescale is also returned. Returns ------- rescaled_image : ``type(self)`` A copy of this image, rescaled. transform : :map:`Transform` The transform that was used. It only applies if `return_transform` is ``True``. Raises ------ ValueError: If less scales than dimensions are provided. If any scale is less than or equal to 0. """ # Pythonic way of converting to list if we are passed a single float try: if len(scale) < self.n_dims: raise ValueError( "Must provide a scale per dimension." "{} scales were provided, {} were expected.".format( len(scale), self.n_dims ) ) except TypeError: # Thrown when len() is called on a float scale = [scale] * self.n_dims # Make sure we have a numpy array scale = np.asarray(scale) for s in scale: if s <= 0: raise ValueError("Scales must be positive floats.") transform = NonUniformScale(scale) # use the scale factor to make the template mask bigger # while respecting the users rounding preference. template_shape = round_image_shape(transform.apply(self.shape), round) # due to image indexing, we can't just apply the pseudoinverse # transform to achieve the scaling we want though! # Consider a 3x rescale on a 2x4 image. Looking at each dimension: # H 2 -> 6 so [0-1] -> [0-5] = 5/1 = 5x # W 4 -> 12 [0-3] -> [0-11] = 11/3 = 3.67x # => need to make the correct scale per dimension! shape = np.array(self.shape, dtype=float) # scale factors = max_index_after / current_max_index # (note that max_index = length - 1, as 0 based) scale_factors = (scale * shape - 1) / (shape - 1) inverse_transform = NonUniformScale(scale_factors).pseudoinverse() # for rescaling we enforce that mode is nearest to avoid num. errors return self.warp_to_shape( template_shape, inverse_transform, warp_landmarks=warp_landmarks, order=order, mode="nearest", return_transform=return_transform, ) def rescale_to_diagonal( self, diagonal, round="ceil", warp_landmarks=True, return_transform=False ): r""" Return a copy of this image, rescaled so that the it's diagonal is a new size. Parameters ---------- diagonal: `int` The diagonal size of the new image. round: ``{ceil, floor, round}``, optional Rounding function to be applied to floating point shapes. warp_landmarks : `bool`, optional If ``True``, result will have the same landmark dictionary as self, but with each landmark updated to the warped position. return_transform : `bool`, optional If ``True``, then the :map:`Transform` object that was used to perform the rescale is also returned. Returns ------- rescaled_image : type(self) A copy of this image, rescaled. transform : :map:`Transform` The transform that was used. It only applies if `return_transform` is ``True``. """ return self.rescale( diagonal / self.diagonal(), round=round, warp_landmarks=warp_landmarks, return_transform=return_transform, ) def rescale_to_pointcloud( self, pointcloud, group=None, round="ceil", order=1, warp_landmarks=True, return_transform=False, ): r""" Return a copy of this image, rescaled so that the scale of a particular group of landmarks matches the scale of the passed reference pointcloud. Parameters ---------- pointcloud: :map:`PointCloud` The reference pointcloud to which the landmarks specified by ``group`` will be scaled to match. group : `str`, optional The key of the landmark set that should be used. If ``None``, and if there is only one set of landmarks, this set will be used. round: ``{ceil, floor, round}``, optional Rounding function to be applied to floating point shapes. order : `int`, optional The order of interpolation. The order has to be in the range [0,5] ========= ==================== Order Interpolation ========= ==================== 0 Nearest-neighbor 1 Bi-linear *(default)* 2 Bi-quadratic 3 Bi-cubic 4 Bi-quartic 5 Bi-quintic ========= ==================== warp_landmarks : `bool`, optional If ``True``, result will have the same landmark dictionary as self, but with each landmark updated to the warped position. return_transform : `bool`, optional If ``True``, then the :map:`Transform` object that was used to perform the rescale is also returned. Returns ------- rescaled_image : ``type(self)`` A copy of this image, rescaled. transform : :map:`Transform` The transform that was used. It only applies if `return_transform` is ``True``. """ pc = self.landmarks[group] scale = AlignmentUniformScale(pc, pointcloud).as_vector().copy() return self.rescale( scale, round=round, order=order, warp_landmarks=warp_landmarks, return_transform=return_transform, ) def rescale_landmarks_to_diagonal_range( self, diagonal_range, group=None, round="ceil", order=1, warp_landmarks=True, return_transform=False, ): r""" Return a copy of this image, rescaled so that the ``diagonal_range`` of the bounding box containing its landmarks matches the specified ``diagonal_range`` range. Parameters ---------- diagonal_range: ``(n_dims,)`` `ndarray` The diagonal_range range that we want the landmarks of the returned image to have. group : `str`, optional The key of the landmark set that should be used. If ``None`` and if there is only one set of landmarks, this set will be used. round : ``{ceil, floor, round}``, optional Rounding function to be applied to floating point shapes. order : `int`, optional The order of interpolation. The order has to be in the range [0,5] ========= ===================== Order Interpolation ========= ===================== 0 Nearest-neighbor 1 Bi-linear *(default)* 2 Bi-quadratic 3 Bi-cubic 4 Bi-quartic 5 Bi-quintic ========= ===================== warp_landmarks : `bool`, optional If ``True``, result will have the same landmark dictionary as self, but with each landmark updated to the warped position. return_transform : `bool`, optional If ``True``, then the :map:`Transform` object that was used to perform the rescale is also returned. Returns ------- rescaled_image : ``type(self)`` A copy of this image, rescaled. transform : :map:`Transform` The transform that was used. It only applies if `return_transform` is ``True``. """ x, y = self.landmarks[group].range() scale = diagonal_range / np.sqrt(x ** 2 + y ** 2) return self.rescale( scale, round=round, order=order, warp_landmarks=warp_landmarks, return_transform=return_transform, ) def resize(self, shape, order=1, warp_landmarks=True, return_transform=False): r""" Return a copy of this image, resized to a particular shape. All image information (landmarks, and mask in the case of :map:`MaskedImage`) is resized appropriately. Parameters ---------- shape : `tuple` The new shape to resize to. order : `int`, optional The order of interpolation. The order has to be in the range [0,5] ========= ===================== Order Interpolation ========= ===================== 0 Nearest-neighbor 1 Bi-linear *(default)* 2 Bi-quadratic 3 Bi-cubic 4 Bi-quartic 5 Bi-quintic ========= ===================== warp_landmarks : `bool`, optional If ``True``, result will have the same landmark dictionary as self, but with each landmark updated to the warped position. return_transform : `bool`, optional If ``True``, then the :map:`Transform` object that was used to perform the resize is also returned. Returns ------- resized_image : ``type(self)`` A copy of this image, resized. transform : :map:`Transform` The transform that was used. It only applies if `return_transform` is ``True``. Raises ------ ValueError: If the number of dimensions of the new shape does not match the number of dimensions of the image. """ shape = np.asarray(shape, dtype=float) if len(shape) != self.n_dims: raise ValueError( "Dimensions must match." "{} dimensions provided, {} were expected.".format( shape.shape, self.n_dims ) ) scales = shape / self.shape # Have to round the shape when scaling to deal with floating point # errors. For example, if we want (250, 250), we need to ensure that # we get (250, 250) even if the number we obtain is 250 to some # floating point inaccuracy. return self.rescale( scales, round="round", order=order, warp_landmarks=warp_landmarks, return_transform=return_transform, ) def zoom(self, scale, order=1, warp_landmarks=True, return_transform=False): r""" Return a copy of this image, zoomed about the centre point. ``scale`` values greater than 1.0 denote zooming **in** to the image and values less than 1.0 denote zooming **out** of the image. The size of the image will not change, if you wish to scale an image, please see :meth:`rescale`. Parameters ---------- scale : `float` ``scale > 1.0`` denotes zooming in. Thus the image will appear larger and areas at the edge of the zoom will be 'cropped' out. ``scale < 1.0`` denotes zooming out. The image will be padded by the value of ``cval``. order : `int`, optional The order of interpolation. The order has to be in the range [0,5] ========= ===================== Order Interpolation ========= ===================== 0 Nearest-neighbor 1 Bi-linear *(default)* 2 Bi-quadratic 3 Bi-cubic 4 Bi-quartic 5 Bi-quintic ========= ===================== warp_landmarks : `bool`, optional If ``True``, result will have the same landmark dictionary as self, but with each landmark updated to the warped position. return_transform : `bool`, optional If ``True``, then the :map:`Transform` object that was used to perform the zooming is also returned. Returns ------- zoomed_image : ``type(self)`` A copy of this image, zoomed. transform : :map:`Transform` The transform that was used. It only applies if `return_transform` is ``True``. """ t = scale_about_centre(self, 1.0 / scale) return self.warp_to_shape( self.shape, t, order=order, mode="nearest", warp_landmarks=warp_landmarks, return_transform=return_transform, ) def rotate_ccw_about_centre( self, theta, degrees=True, retain_shape=False, mode="constant", cval=0.0, round="round", order=1, warp_landmarks=True, return_transform=False, ): r""" Return a copy of this image, rotated counter-clockwise about its centre. Note that the `retain_shape` argument defines the shape of the rotated image. If ``retain_shape=True``, then the shape of the rotated image will be the same as the one of current image, so some regions will probably be cropped. If ``retain_shape=False``, then the returned image has the correct size so that the whole area of the current image is included. Parameters ---------- theta : `float` The angle of rotation about the centre. degrees : `bool`, optional If ``True``, `theta` is interpreted in degrees. If ``False``, ``theta`` is interpreted as radians. retain_shape : `bool`, optional If ``True``, then the shape of the rotated image will be the same as the one of current image, so some regions will probably be cropped. If ``False``, then the returned image has the correct size so that the whole area of the current image is included. mode : ``{constant, nearest, reflect, wrap}``, optional Points outside the boundaries of the input are filled according to the given mode. cval : `float`, optional The value to be set outside the rotated image boundaries. round : ``{'ceil', 'floor', 'round'}``, optional Rounding function to be applied to floating point shapes. This is only used in case ``retain_shape=True``. order : `int`, optional The order of interpolation. The order has to be in the range ``[0,5]``. This is only used in case ``retain_shape=True``. ========= ==================== Order Interpolation ========= ==================== 0 Nearest-neighbor 1 Bi-linear *(default)* 2 Bi-quadratic 3 Bi-cubic 4 Bi-quartic 5 Bi-quintic ========= ==================== warp_landmarks : `bool`, optional If ``True``, result will have the same landmark dictionary as ``self``, but with each landmark updated to the warped position. return_transform : `bool`, optional If ``True``, then the :map:`Transform` object that was used to perform the rotation is also returned. Returns ------- rotated_image : ``type(self)`` The rotated image. transform : :map:`Transform` The transform that was used. It only applies if `return_transform` is ``True``. Raises ------ ValueError Image rotation is presently only supported on 2D images """ if self.n_dims != 2: raise ValueError( "Image rotation is presently only supported on " "2D images" ) rotation = Rotation.init_from_2d_ccw_angle(theta, degrees=degrees) return self.transform_about_centre( rotation, retain_shape=retain_shape, mode=mode, cval=cval, round=round, order=order, warp_landmarks=warp_landmarks, return_transform=return_transform, ) def transform_about_centre( self, transform, retain_shape=False, mode="constant", cval=0.0, round="round", order=1, warp_landmarks=True, return_transform=False, ): r""" Return a copy of this image, transformed about its centre. Note that the `retain_shape` argument defines the shape of the transformed image. If ``retain_shape=True``, then the shape of the transformed image will be the same as the one of current image, so some regions will probably be cropped. If ``retain_shape=False``, then the returned image has the correct size so that the whole area of the current image is included. .. note:: This method will not work for transforms that result in a transform chain as :map:`TransformChain` is not invertible. .. note:: Be careful when defining transforms for warping imgaes. All pixel locations must fall within a valid range as expected by the transform. Therefore, your transformation must accept 'negative' pixel locations as the pixel locations provided to your transform will have the object centre subtracted from them. Parameters ---------- transform : :map:`ComposableTransform` and :map:`VInvertible` type A composable transform. ``pseudoinverse`` will be invoked on the resulting transform so it must implement a valid inverse. retain_shape : `bool`, optional If ``True``, then the shape of the sheared image will be the same as the one of current image, so some regions will probably be cropped. If ``False``, then the returned image has the correct size so that the whole area of the current image is included. mode : ``{constant, nearest, reflect, wrap}``, optional Points outside the boundaries of the input are filled according to the given mode. cval : `float`, optional The value to be set outside the sheared image boundaries. round : ``{'ceil', 'floor', 'round'}``, optional Rounding function to be applied to floating point shapes. This is only used in case ``retain_shape=True``. order : `int`, optional The order of interpolation. The order has to be in the range ``[0,5]``. This is only used in case ``retain_shape=True``. ========= ==================== Order Interpolation ========= ==================== 0 Nearest-neighbor 1 Bi-linear *(default)* 2 Bi-quadratic 3 Bi-cubic 4 Bi-quartic 5 Bi-quintic ========= ==================== warp_landmarks : `bool`, optional If ``True``, result will have the same landmark dictionary as ``self``, but with each landmark updated to the warped position. return_transform : `bool`, optional If ``True``, then the :map:`Transform` object that was used to perform the shearing is also returned. Returns ------- transformed_image : ``type(self)`` The transformed image. transform : :map:`Transform` The transform that was used. It only applies if `return_transform` is ``True``. Examples -------- This is an example for rotating an image about its center. Let's first load an image, create the rotation transform and then apply it :: import matplotlib.pyplot as plt import menpo.io as mio from menpo.transform import Rotation # Load image im = mio.import_builtin_asset.lenna_png() # Create shearing transform rot_tr = Rotation.init_from_2d_ccw_angle(45) # Render original image plt.subplot(131) im.view_landmarks() plt.title('Original') # Render rotated image plt.subplot(132) im.transform_about_centre(rot_tr).view_landmarks() plt.title('Rotated') # Render rotated image that has shape equal as original image plt.subplot(133) im.transform_about_centre(rot_tr, retain_shape=True).view_landmarks() plt.title('Rotated (Retain original shape)') Similarly, in order to apply a shear transform :: import matplotlib.pyplot as plt import menpo.io as mio from menpo.transform import Affine # Load image im = mio.import_builtin_asset.lenna_png() # Create shearing transform shear_tr = Affine.init_from_2d_shear(25, 10) # Render original image plt.subplot(131) im.view_landmarks() plt.title('Original') # Render sheared image plt.subplot(132) im.transform_about_centre(shear_tr).view_landmarks() plt.title('Sheared') # Render sheared image that has shape equal as original image plt.subplot(133) im.transform_about_centre(shear_tr, retain_shape=True).view_landmarks() plt.title('Sheared (Retain original shape)') """ if retain_shape: shape = self.shape applied_transform = transform_about_centre(self, transform) else: # Get image's bounding box coordinates original_bbox = bounding_box((0, 0), np.array(self.shape) - 1) # Translate to origin and apply transform trans = Translation(-self.centre(), skip_checks=True).compose_before( transform ) transformed_bbox = trans.apply(original_bbox) # Create new translation so that min bbox values go to 0 t = Translation(-transformed_bbox.bounds()[0]) applied_transform = trans.compose_before(t) transformed_bbox = trans.apply(original_bbox) # Output image's shape is the range of the sheared bounding box # while respecting the users rounding preference. shape = round_image_shape(transformed_bbox.range() + 1, round) # Warp image return self.warp_to_shape( shape, applied_transform.pseudoinverse(), order=order, warp_landmarks=warp_landmarks, mode=mode, cval=cval, return_transform=return_transform, ) def mirror(self, axis=1, order=1, warp_landmarks=True, return_transform=False): r""" Return a copy of this image, mirrored/flipped about a certain axis. Parameters ---------- axis : `int`, optional The axis about which to mirror the image. order : `int`, optional The order of interpolation. The order has to be in the range ``[0,5]``. ========= ==================== Order Interpolation ========= ==================== 0 Nearest-neighbor 1 Bi-linear *(default)* 2 Bi-quadratic 3 Bi-cubic 4 Bi-quartic 5 Bi-quintic ========= ==================== warp_landmarks : `bool`, optional If ``True``, result will have the same landmark dictionary as self, but with each landmark updated to the warped position. return_transform : `bool`, optional If ``True``, then the :map:`Transform` object that was used to perform the mirroring is also returned. Returns ------- mirrored_image : ``type(self)`` The mirrored image. transform : :map:`Transform` The transform that was used. It only applies if `return_transform` is ``True``. Raises ------ ValueError axis cannot be negative ValueError axis={} but the image has {} dimensions """ # Check axis argument if axis < 0: raise ValueError("axis cannot be negative") elif axis >= self.n_dims: raise ValueError( "axis={} but the image has {} " "dimensions".format(axis, self.n_dims) ) # Create transform that includes ... # ... flipping about the selected axis ... rot_matrix = np.eye(self.n_dims) rot_matrix[axis, axis] = -1 # ... and translating back to the image's bbox tr_matrix = np.zeros(self.n_dims) tr_matrix[axis] = self.shape[axis] - 1 # Create transform object trans = Rotation(rot_matrix, skip_checks=True).compose_before( Translation(tr_matrix, skip_checks=True) ) # Warp image return self.warp_to_shape( self.shape, trans.pseudoinverse(), mode="nearest", order=order, warp_landmarks=warp_landmarks, return_transform=return_transform, ) def pyramid(self, n_levels=3, downscale=2): r""" Return a rescaled pyramid of this image. The first image of the pyramid will be a copy of the original, unmodified, image, and counts as level 1. Parameters ---------- n_levels : `int`, optional Total number of levels in the pyramid, including the original unmodified image downscale : `float`, optional Downscale factor. Yields ------ image_pyramid: `generator` Generator yielding pyramid layers as :map:`Image` objects. """ image = self.copy() yield image for _ in range(n_levels - 1): image = image.rescale(1.0 / downscale) yield image def gaussian_pyramid(self, n_levels=3, downscale=2, sigma=None): r""" Return the gaussian pyramid of this image. The first image of the pyramid will be a copy of the original, unmodified, image, and counts as level 1. Parameters ---------- n_levels : `int`, optional Total number of levels in the pyramid, including the original unmodified image downscale : `float`, optional Downscale factor. sigma : `float`, optional Sigma for gaussian filter. Default is ``downscale / 3.`` which corresponds to a filter mask twice the size of the scale factor that covers more than 99% of the gaussian distribution. Yields ------ image_pyramid: `generator` Generator yielding pyramid layers as :map:`Image` objects. """ from menpo.feature import gaussian_filter if sigma is None: sigma = downscale / 3.0 image = self.copy() yield image for level in range(n_levels - 1): image = gaussian_filter(image, sigma).rescale(1.0 / downscale) yield image def as_greyscale(self, mode="luminosity", channel=None): r""" Returns a greyscale version of the image. If the image does *not* represent a 2D RGB image, then the ``luminosity`` mode will fail. Parameters ---------- mode : ``{average, luminosity, channel}``, optional ============== ===================================================== mode Greyscale Algorithm ============== ===================================================== average Equal average of all channels luminosity Calculates the luminance using the CCIR 601 formula: | .. math:: Y' = 0.2989 R' + 0.5870 G' + 0.1140 B' channel A specific channel is chosen as the intensity value. ============== ===================================================== channel: `int`, optional The channel to be taken. Only used if mode is ``channel``. Returns ------- greyscale_image : :map:`MaskedImage` A copy of this image in greyscale. """ greyscale = self.copy() if mode == "luminosity": if self.n_dims != 2: raise ValueError( "The 'luminosity' mode only works on 2D RGB" "images. {} dimensions found, " "2 expected.".format(self.n_dims) ) elif self.n_channels != 3: raise ValueError( "The 'luminosity' mode only works on RGB" "images. {} channels found, " "3 expected.".format(self.n_channels) ) # Only compute the coefficients once. global _greyscale_luminosity_coef if _greyscale_luminosity_coef is None: _greyscale_luminosity_coef = np.linalg.inv( np.array( [ [1.0, 0.956, 0.621], [1.0, -0.272, -0.647], [1.0, -1.106, 1.703], ] ) )[0, :] # Compute greyscale via dot product pixels = np.dot(_greyscale_luminosity_coef, greyscale.pixels.reshape(3, -1)) # Reshape image back to original shape (with 1 channel) pixels = pixels.reshape(greyscale.shape) elif mode == "average": pixels = np.mean(greyscale.pixels, axis=0) elif mode == "channel": if channel is None: raise ValueError( "For the 'channel' mode you have to provide" " a channel index" ) pixels = greyscale.pixels[channel] else: raise ValueError( "Unknown mode {} - expected 'luminosity', " "'average' or 'channel'.".format(mode) ) # Set new pixels - ensure channel axis and maintain greyscale.pixels = pixels[None, ...].astype(greyscale.pixels.dtype, copy=False) return greyscale def as_PILImage(self, out_dtype=np.uint8): r""" Return a PIL copy of the image scaled and cast to the correct values for the provided ``out_dtype``. Image must only have 1 or 3 channels and be 2 dimensional. Non `uint8` floating point images must be in the range ``[0, 1]`` to be converted. Parameters ---------- out_dtype : `np.dtype`, optional The dtype the output array should be. Returns ------- pil_image : `PILImage` PIL copy of image Raises ------ ValueError If image is not 2D and has 1 channel or 3 channels. ValueError If pixels data type is `float32` or `float64` and the pixel range is outside of ``[0, 1]`` ValueError If the output dtype is unsupported. Currently uint8 is supported. """ if self.n_dims != 2 or (self.n_channels != 1 and self.n_channels != 3): raise ValueError( "Can only convert greyscale or RGB 2D images. " "Received a {} channel {}D image.".format(self.n_channels, self.n_dims) ) # Slice off the channel for greyscale images if self.n_channels == 1: pixels = self.pixels[0] else: pixels = channels_to_back(self.pixels) pixels = denormalize_pixels_range(pixels, out_dtype) return PILImage.fromarray(pixels) def as_imageio(self, out_dtype=np.uint8): r""" Return an Imageio copy of the image scaled and cast to the correct values for the provided ``out_dtype``. Image must only have 1 or 3 channels and be 2 dimensional. Non `uint8` floating point images must be in the range ``[0, 1]`` to be converted. Parameters ---------- out_dtype : `np.dtype`, optional The dtype the output array should be. Returns ------- imageio_image : `ndarray` Imageio image (which is just a numpy ndarray with the channels as the last axis). Raises ------ ValueError If image is not 2D and has 1 channel or 3 channels. ValueError If pixels data type is `float32` or `float64` and the pixel range is outside of ``[0, 1]`` ValueError If the output dtype is unsupported. Currently uint8 and uint16 are supported. """ warn( "This method is no longer supported and will be removed in a " "future version of Menpo. " "Use .pixels_with_channels_at_back instead.", MenpoDeprecationWarning, ) if self.n_dims != 2 or (self.n_channels != 1 and self.n_channels != 3): raise ValueError( "Can only convert greyscale or RGB 2D images. " "Received a {} channel {}D image.".format(self.n_channels, self.n_dims) ) # Slice off the channel for greyscale images if self.n_channels == 1: pixels = self.pixels[0] else: pixels = channels_to_back(self.pixels) return denormalize_pixels_range(pixels, out_dtype) def pixels_range(self): r""" The range of the pixel values (min and max pixel values). Returns ------- min_max : ``(dtype, dtype)`` The minimum and maximum value of the pixels array. """ return self.pixels.min(), self.pixels.max() def rolled_channels(self): r""" Deprecated - please use the equivalent ``pixels_with_channels_at_back`` method. """ warn( "This method is no longer supported and will be removed in a " "future version of Menpo. " "Use .pixels_with_channels_at_back() instead.", MenpoDeprecationWarning, ) return self.pixels_with_channels_at_back() def pixels_with_channels_at_back(self, out_dtype=None): r""" Returns the pixels matrix, with the channels rolled to the back axis. This may be required for interacting with external code bases that require images to have channels as the last axis, rather than the Menpo convention of channels as the first axis. If this image is single channel, the final axis is dropped. Parameters ---------- out_dtype : `np.dtype`, optional The dtype the output array should be. Returns ------- rolled_channels : `ndarray` Pixels with channels as the back (last) axis. If single channel, the last axis will be dropped. """ p = channels_to_back(self.pixels) if out_dtype is not None: p = denormalize_pixels_range(p, out_dtype=out_dtype) return np.squeeze(p) def __str__(self): return "{} {}D Image with {} channel{}".format( self._str_shape(), self.n_dims, self.n_channels, "s" * (self.n_channels > 1) ) def has_landmarks_outside_bounds(self): """ Indicates whether there are landmarks located outside the image bounds. :type: `bool` """ if self.has_landmarks: for l_group in self.landmarks: pc = self.landmarks[l_group].points if np.any(np.logical_or(self.shape - pc < 1, pc < 0)): return True return False def constrain_landmarks_to_bounds(self): r""" Deprecated - please use the equivalent ``constrain_to_bounds`` method now on PointCloud, in conjunction with the new Image ``bounds()`` method. For example: >>> im.constrain_landmarks_to_bounds() # Equivalent to below >>> im.landmarks['test'] = im.landmarks['test'].constrain_to_bounds(im.bounds()) """ warn( "This method is no longer supported and will be removed in a " "future version of Menpo. " "Use .constrain_to_bounds() instead (on PointCloud).", MenpoDeprecationWarning, ) for l_group in self.landmarks: l = self.landmarks[l_group] for k in range(l.points.shape[1]): tmp = l.points[:, k] tmp[tmp < 0] = 0 tmp[tmp > self.shape[k] - 1] = self.shape[k] - 1 l.points[:, k] = tmp self.landmarks[l_group] = l def normalize_std(self, mode="all", **kwargs): r""" Returns a copy of this image normalized such that its pixel values have zero mean and unit variance. Parameters ---------- mode : ``{all, per_channel}``, optional If ``all``, the normalization is over all channels. If ``per_channel``, each channel individually is mean centred and normalized in variance. Returns ------- image : ``type(self)`` A copy of this image, normalized. """ warn( "This method is no longer supported and will be removed in a " "future version of Menpo. " "Use .normalize_std() instead (features package).", MenpoDeprecationWarning, ) return self._normalize(np.std, mode=mode) def normalize_norm(self, mode="all", **kwargs): r""" Returns a copy of this image normalized such that its pixel values have zero mean and its norm equals 1. Parameters ---------- mode : ``{all, per_channel}``, optional If ``all``, the normalization is over all channels. If ``per_channel``, each channel individually is mean centred and unit norm. Returns ------- image : ``type(self)`` A copy of this image, normalized. """ warn( "This method is no longer supported and will be removed in a " "future version of Menpo. " "Use .normalize_norm() instead (features package).", MenpoDeprecationWarning, ) def scale_func(pixels, axis=None): return np.linalg.norm(pixels, axis=axis, **kwargs) return self._normalize(scale_func, mode=mode) def _normalize(self, scale_func, mode="all"): from menpo.feature import normalize return normalize(self, scale_func=scale_func, mode=mode) def rescale_pixels(self, minimum, maximum, per_channel=True): r"""A copy of this image with pixels linearly rescaled to fit a range. Note that the only pixels that will be considered and rescaled are those that feature in the vectorized form of this image. If you want to use this routine on all the pixels in a :map:`MaskedImage`, consider using `as_unmasked()` prior to this call. Parameters ---------- minimum: `float` The minimal value of the rescaled pixels maximum: `float` The maximal value of the rescaled pixels per_channel: `boolean`, optional If ``True``, each channel will be rescaled independently. If ``False``, the scaling will be over all channels. Returns ------- rescaled_image: ``type(self)`` A copy of this image with pixels linearly rescaled to fit in the range provided. """ v = self.as_vector(keep_channels=True).T if per_channel: min_, max_ = v.min(axis=0), v.max(axis=0) else: min_, max_ = v.min(), v.max() sf = ((maximum - minimum) * 1.0) / (max_ - min_) v_new = ((v - min_) * sf) + minimum return self.from_vector(v_new.T.ravel()) def clip_pixels(self, minimum=None, maximum=None): r"""A copy of this image with pixels linearly clipped to fit a range. Parameters ---------- minimum: `float`, optional The minimal value of the clipped pixels. If None is provided, the default value will be 0. maximum: `float`, optional The maximal value of the clipped pixels. If None is provided, the default value will depend on the dtype. Returns ------- rescaled_image: ``type(self)`` A copy of this image with pixels linearly rescaled to fit in the range provided. """ if minimum is None: minimum = 0 if maximum is None: dtype = self.pixels.dtype if dtype == np.uint8: maximum = 255 elif dtype == np.uint16: maximum = 65535 elif dtype in [np.float32, np.float64]: maximum = 1.0 else: m1 = "Could not recognise the dtype ({}) to set the maximum." raise ValueError(m1.format(dtype)) copy = self.copy() copy.pixels = copy.pixels.clip(min=minimum, max=maximum) return copy def rasterize_landmarks( self, group=None, render_lines=True, line_style="-", line_colour="b", line_width=1, render_markers=True, marker_style="o", marker_size=1, marker_face_colour="b", marker_edge_colour="b", marker_edge_width=1, backend="matplotlib", ): r""" This method provides the ability to rasterize 2D landmarks onto the image. The returned image has the specified landmark groups rasterized onto the image - which is useful for things like creating result examples or rendering videos with annotations. Since multiple landmark groups can be specified, all arguments can take lists of parameters that map to the provided groups list. Therefore, the parameters must be lists of the correct length or a single parameter to apply to every landmark group. Multiple backends are provided, all with different strengths. The 'pillow' backend is very fast, but not very flexible. The `matplotlib` backend should be feature compatible with other Menpo rendering methods, but is much slower due to the overhead of creating a figure to render into. Parameters ---------- group : `str` or `list` of `str`, optional The landmark group key, or a list of keys. render_lines : `bool`, optional If ``True``, and the provided landmark group is a :map:`PointDirectedGraph`, the edges are rendered. line_style : `str`, optional The style of the edge line. Not all backends support this argument. line_colour : `str` or `tuple`, optional A Matplotlib style colour or a backend dependant colour. line_width : `int`, optional The width of the line to rasterize. render_markers : `bool`, optional If ``True``, render markers at the coordinates of each landmark. marker_style : `str`, optional A Matplotlib marker style. Not all backends support all marker styles. marker_size : `int`, optional The size of the marker - different backends use different scale spaces so consistent output may by difficult. marker_face_colour : `str`, optional A Matplotlib style colour or a backend dependant colour. marker_edge_colour : `str`, optional A Matplotlib style colour or a backend dependant colour. marker_edge_width : `int`, optional The width of the marker edge. Not all backends support this. backend : {'matplotlib', 'pillow'}, optional The backend to use. Returns ------- rasterized_image : :map:`Image` The image with the landmarks rasterized directly into the pixels. Raises ------ ValueError Only 2D images are supported. ValueError Only RGB (3-channel) or Greyscale (1-channel) images are supported. """ from .rasterize import rasterize_landmarks_2d return rasterize_landmarks_2d( self, group=group, render_lines=render_lines, line_style=line_style, line_colour=line_colour, line_width=line_width, render_markers=render_markers, marker_style=marker_style, marker_size=marker_size, marker_face_colour=marker_face_colour, marker_edge_colour=marker_edge_colour, marker_edge_width=marker_edge_width, backend=backend, ) def round_image_shape(shape, round): if round not in ["ceil", "round", "floor"]: raise ValueError("round must be either ceil, round or floor") # Ensure that the '+' operator means concatenate tuples return tuple(getattr(np, round)(shape).astype(int)) def _convert_patches_list_to_single_array(patches_list, n_center): r""" Converts patches from a `list` of :map:`Image` objects to a single `ndarray` with shape ``(n_center, n_offset, self.n_channels, patch_shape)``. Note that these two are the formats returned by the `extract_patches()` and `extract_patches_around_landmarks()` methods of :map:`Image` class. Parameters ---------- patches_list : `list` of `n_center * n_offset` :map:`Image` objects A `list` that contains all the patches as :map:`Image` objects. n_center : `int` The number of centers from which the patches are extracted. Returns ------- patches_array : `ndarray` ``(n_center, n_offset, n_channels, patch_shape)`` The numpy array that contains all the patches. """ n_offsets = int(len(patches_list) / n_center) n_channels = patches_list[0].n_channels height = patches_list[0].height width = patches_list[0].width patches_array = np.empty( (n_center, n_offsets, n_channels, height, width), dtype=patches_list[0].pixels.dtype, ) total_index = 0 for p in range(n_center): for o in range(n_offsets): patches_array[p, o, ...] = patches_list[total_index].pixels total_index += 1 return patches_array def _create_patches_image( patches, patch_centers, patches_indices=None, offset_index=None, background="black" ): r""" Creates an :map:`Image` object in which the patches are located on the correct regions based on the centers. Thus, the image is a block-sparse matrix. It has also attached a `patch_Centers` :map:`PointCloud` object with the centers that correspond to the patches that the user selected to set. The patches argument can have any of the two formats that are returned from the `extract_patches()` and `extract_patches_around_landmarks()` methods of the :map:`Image` class. Specifically it can be: 1. ``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray` 2. `list` of ``n_center * n_offset`` :map:`Image` objects Parameters ---------- patches : `ndarray` or `list` The values of the patches. It can have any of the two formats that are returned from the `extract_patches()` and `extract_patches_around_landmarks()` methods. Specifically, it can either be an ``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray` or a `list` of ``n_center * n_offset`` :map:`Image` objects. patch_centers : :map:`PointCloud` The centers to set the patches around. patches_indices : `int` or `list` of `int` or ``None``, optional Defines the patches that will be set (copied) to the image. If ``None``, then all the patches are copied. offset_index : `int` or ``None``, optional The offset index within the provided `patches` argument, thus the index of the second dimension from which to sample. If ``None``, then ``0`` is used. background : ``{'black', 'white'}``, optional If ``'black'``, then the background is set equal to the minimum value of `patches`. If ``'white'``, then the background is set equal to the maximum value of `patches`. Returns ------- patches_image : :map:`Image` The output patches image object. Raises ------ ValueError Background must be either ''black'' or ''white''. """ # If patches is a list, convert it to array if isinstance(patches, list): patches = _convert_patches_list_to_single_array(patches, patch_centers.n_points) # Parse inputs if offset_index is None: offset_index = 0 if patches_indices is None: patches_indices = np.arange(patches.shape[0]) elif not isinstance(patches_indices, Iterable): patches_indices = [patches_indices] # Compute patches image's shape n_channels = patches.shape[2] patch_shape0 = patches.shape[3] patch_shape1 = patches.shape[4] top, left = np.min(patch_centers.points, 0) bottom, right = np.max(patch_centers.points, 0) min_0 = np.floor(top - patch_shape0) min_1 = np.floor(left - patch_shape1) max_0 = np.ceil(bottom + patch_shape0) max_1 = np.ceil(right + patch_shape1) height = max_0 - min_0 + 1 width = max_1 - min_1 + 1 # Translate the patch centers to fit in the new image new_patch_centers = patch_centers.copy() new_patch_centers.points = patch_centers.points - np.array([[min_0, min_1]]) # Create new image with the correct background values if background == "black": patches_image = Image.init_blank( (height, width), n_channels, fill=np.min(patches[patches_indices]), dtype=patches.dtype, ) elif background == "white": patches_image = Image.init_blank( (height, width), n_channels, fill=np.max(patches[patches_indices]), dtype=patches.dtype, ) else: raise ValueError("Background must be either " "black" " or " "white" ".") # If there was no slicing on the patches, then attach the original patch # centers. Otherwise, attach the sliced ones. if set(patches_indices) == set(range(patches.shape[0])): patches_image.landmarks["patch_centers"] = new_patch_centers else: tmp_centers = PointCloud(new_patch_centers.points[patches_indices]) patches_image.landmarks["patch_centers"] = tmp_centers # Set the patches return patches_image.set_patches_around_landmarks( patches[patches_indices], group="patch_centers", offset_index=offset_index )
bsd-3-clause
cdrttn/samba-regedit
lib/dnspython/dns/rdtypes/ANY/NSEC3PARAM.py
85
3169
# Copyright (C) 2004-2007, 2009-2011 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. import cStringIO import struct import dns.exception import dns.rdata class NSEC3PARAM(dns.rdata.Rdata): """NSEC3PARAM record @ivar algorithm: the hash algorithm number @type algorithm: int @ivar flags: the flags @type flags: int @ivar iterations: the number of iterations @type iterations: int @ivar salt: the salt @type salt: string""" __slots__ = ['algorithm', 'flags', 'iterations', 'salt'] def __init__(self, rdclass, rdtype, algorithm, flags, iterations, salt): super(NSEC3PARAM, self).__init__(rdclass, rdtype) self.algorithm = algorithm self.flags = flags self.iterations = iterations self.salt = salt def to_text(self, origin=None, relativize=True, **kw): if self.salt == '': salt = '-' else: salt = self.salt.encode('hex-codec') return '%u %u %u %s' % (self.algorithm, self.flags, self.iterations, salt) def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True): algorithm = tok.get_uint8() flags = tok.get_uint8() iterations = tok.get_uint16() salt = tok.get_string() if salt == '-': salt = '' else: salt = salt.decode('hex-codec') return cls(rdclass, rdtype, algorithm, flags, iterations, salt) from_text = classmethod(from_text) def to_wire(self, file, compress = None, origin = None): l = len(self.salt) file.write(struct.pack("!BBHB", self.algorithm, self.flags, self.iterations, l)) file.write(self.salt) def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None): (algorithm, flags, iterations, slen) = struct.unpack('!BBHB', wire[current : current + 5]) current += 5 rdlen -= 5 salt = wire[current : current + slen].unwrap() current += slen rdlen -= slen if rdlen != 0: raise dns.exception.FormError return cls(rdclass, rdtype, algorithm, flags, iterations, salt) from_wire = classmethod(from_wire) def _cmp(self, other): b1 = cStringIO.StringIO() self.to_wire(b1) b2 = cStringIO.StringIO() other.to_wire(b2) return cmp(b1.getvalue(), b2.getvalue())
gpl-3.0
dulems/hue
desktop/core/ext-py/Django-1.6.10/django/utils/translation/trans_real.py
74
25651
"""Translation helper functions.""" from __future__ import unicode_literals import locale import os import re import sys import gettext as gettext_module from threading import local import warnings from django.utils.importlib import import_module from django.utils.datastructures import SortedDict from django.utils.encoding import force_str, force_text from django.utils.functional import memoize from django.utils._os import upath from django.utils.safestring import mark_safe, SafeData from django.utils import six from django.utils.six import StringIO from django.utils.translation import TranslatorCommentWarning # Translations are cached in a dictionary for every language+app tuple. # The active translations are stored by threadid to make them thread local. _translations = {} _active = local() # The default translation is based on the settings file. _default = None # This is a cache for normalized accept-header languages to prevent multiple # file lookups when checking the same locale on repeated requests. _accepted = {} _checked_languages = {} # magic gettext number to separate context from message CONTEXT_SEPARATOR = "\x04" # Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9 # and RFC 3066, section 2.1 accept_language_re = re.compile(r''' ([A-Za-z]{1,8}(?:-[A-Za-z0-9]{1,8})*|\*) # "en", "en-au", "x-y-z", "es-419", "*" (?:\s*;\s*q=(0(?:\.\d{,3})?|1(?:.0{,3})?))? # Optional "q=1.00", "q=0.8" (?:\s*,\s*|$) # Multiple accepts per header. ''', re.VERBOSE) language_code_prefix_re = re.compile(r'^/([\w-]+)(/|$)') def to_locale(language, to_lower=False): """ Turns a language name (en-us) into a locale name (en_US). If 'to_lower' is True, the last component is lower-cased (en_us). """ p = language.find('-') if p >= 0: if to_lower: return language[:p].lower()+'_'+language[p+1:].lower() else: # Get correct locale for sr-latn if len(language[p+1:]) > 2: return language[:p].lower()+'_'+language[p+1].upper()+language[p+2:].lower() return language[:p].lower()+'_'+language[p+1:].upper() else: return language.lower() def to_language(locale): """Turns a locale name (en_US) into a language name (en-us).""" p = locale.find('_') if p >= 0: return locale[:p].lower()+'-'+locale[p+1:].lower() else: return locale.lower() class DjangoTranslation(gettext_module.GNUTranslations): """ This class sets up the GNUTranslations context with regard to output charset. """ def __init__(self, *args, **kw): gettext_module.GNUTranslations.__init__(self, *args, **kw) self.set_output_charset('utf-8') self.__language = '??' def merge(self, other): self._catalog.update(other._catalog) def set_language(self, language): self.__language = language self.__to_language = to_language(language) def language(self): return self.__language def to_language(self): return self.__to_language def __repr__(self): return "<DjangoTranslation lang:%s>" % self.__language def translation(language): """ Returns a translation object. This translation object will be constructed out of multiple GNUTranslations objects by merging their catalogs. It will construct a object for the requested language and add a fallback to the default language, if it's different from the requested language. """ global _translations t = _translations.get(language, None) if t is not None: return t from django.conf import settings globalpath = os.path.join(os.path.dirname(upath(sys.modules[settings.__module__].__file__)), 'locale') def _fetch(lang, fallback=None): global _translations res = _translations.get(lang, None) if res is not None: return res loc = to_locale(lang) def _translation(path): try: t = gettext_module.translation('django', path, [loc], DjangoTranslation) t.set_language(lang) return t except IOError: return None res = _translation(globalpath) # We want to ensure that, for example, "en-gb" and "en-us" don't share # the same translation object (thus, merging en-us with a local update # doesn't affect en-gb), even though they will both use the core "en" # translation. So we have to subvert Python's internal gettext caching. base_lang = lambda x: x.split('-', 1)[0] if base_lang(lang) in [base_lang(trans) for trans in list(_translations)]: res._info = res._info.copy() res._catalog = res._catalog.copy() def _merge(path): t = _translation(path) if t is not None: if res is None: return t else: res.merge(t) return res for appname in reversed(settings.INSTALLED_APPS): app = import_module(appname) apppath = os.path.join(os.path.dirname(upath(app.__file__)), 'locale') if os.path.isdir(apppath): res = _merge(apppath) for localepath in reversed(settings.LOCALE_PATHS): if os.path.isdir(localepath): res = _merge(localepath) if res is None: if fallback is not None: res = fallback else: return gettext_module.NullTranslations() _translations[lang] = res return res default_translation = _fetch(settings.LANGUAGE_CODE) current_translation = _fetch(language, fallback=default_translation) return current_translation def activate(language): """ Fetches the translation object for a given tuple of application name and language and installs it as the current translation object for the current thread. """ _active.value = translation(language) def deactivate(): """ Deinstalls the currently active translation object so that further _ calls will resolve against the default translation object, again. """ if hasattr(_active, "value"): del _active.value def deactivate_all(): """ Makes the active translation object a NullTranslations() instance. This is useful when we want delayed translations to appear as the original string for some reason. """ _active.value = gettext_module.NullTranslations() def get_language(): """Returns the currently selected language.""" t = getattr(_active, "value", None) if t is not None: try: return t.to_language() except AttributeError: pass # If we don't have a real translation object, assume it's the default language. from django.conf import settings return settings.LANGUAGE_CODE def get_language_bidi(): """ Returns selected language's BiDi layout. * False = left-to-right layout * True = right-to-left layout """ from django.conf import settings base_lang = get_language().split('-')[0] return base_lang in settings.LANGUAGES_BIDI def catalog(): """ Returns the current active catalog for further processing. This can be used if you need to modify the catalog or want to access the whole message catalog instead of just translating one string. """ global _default t = getattr(_active, "value", None) if t is not None: return t if _default is None: from django.conf import settings _default = translation(settings.LANGUAGE_CODE) return _default def do_translate(message, translation_function): """ Translates 'message' using the given 'translation_function' name -- which will be either gettext or ugettext. It uses the current thread to find the translation object to use. If no current translation is activated, the message will be run through the default translation object. """ global _default # str() is allowing a bytestring message to remain bytestring on Python 2 eol_message = message.replace(str('\r\n'), str('\n')).replace(str('\r'), str('\n')) t = getattr(_active, "value", None) if t is not None: result = getattr(t, translation_function)(eol_message) else: if _default is None: from django.conf import settings _default = translation(settings.LANGUAGE_CODE) result = getattr(_default, translation_function)(eol_message) if isinstance(message, SafeData): return mark_safe(result) return result def gettext(message): """ Returns a string of the translation of the message. Returns a string on Python 3 and an UTF-8-encoded bytestring on Python 2. """ return do_translate(message, 'gettext') if six.PY3: ugettext = gettext else: def ugettext(message): return do_translate(message, 'ugettext') def pgettext(context, message): msg_with_ctxt = "%s%s%s" % (context, CONTEXT_SEPARATOR, message) result = ugettext(msg_with_ctxt) if CONTEXT_SEPARATOR in result: # Translation not found # force unicode, because lazy version expects unicode result = force_text(message) return result def gettext_noop(message): """ Marks strings for translation but doesn't translate them now. This can be used to store strings in global variables that should stay in the base language (because they might be used externally) and will be translated later. """ return message def do_ntranslate(singular, plural, number, translation_function): global _default t = getattr(_active, "value", None) if t is not None: return getattr(t, translation_function)(singular, plural, number) if _default is None: from django.conf import settings _default = translation(settings.LANGUAGE_CODE) return getattr(_default, translation_function)(singular, plural, number) def ngettext(singular, plural, number): """ Returns a string of the translation of either the singular or plural, based on the number. Returns a string on Python 3 and an UTF-8-encoded bytestring on Python 2. """ return do_ntranslate(singular, plural, number, 'ngettext') if six.PY3: ungettext = ngettext else: def ungettext(singular, plural, number): """ Returns a unicode strings of the translation of either the singular or plural, based on the number. """ return do_ntranslate(singular, plural, number, 'ungettext') def npgettext(context, singular, plural, number): msgs_with_ctxt = ("%s%s%s" % (context, CONTEXT_SEPARATOR, singular), "%s%s%s" % (context, CONTEXT_SEPARATOR, plural), number) result = ungettext(*msgs_with_ctxt) if CONTEXT_SEPARATOR in result: # Translation not found result = ungettext(singular, plural, number) return result def all_locale_paths(): """ Returns a list of paths to user-provides languages files. """ from django.conf import settings globalpath = os.path.join( os.path.dirname(upath(sys.modules[settings.__module__].__file__)), 'locale') return [globalpath] + list(settings.LOCALE_PATHS) def check_for_language(lang_code): """ Checks whether there is a global language file for the given language code. This is used to decide whether a user-provided language is available. This is only used for language codes from either the cookies or session and during format localization. """ for path in all_locale_paths(): if gettext_module.find('django', path, [to_locale(lang_code)]) is not None: return True return False check_for_language = memoize(check_for_language, _checked_languages, 1) def get_supported_language_variant(lang_code, supported=None, strict=False): """ Returns the language-code that's listed in supported languages, possibly selecting a more generic variant. Raises LookupError if nothing found. If `strict` is False (the default), the function will look for an alternative country-specific variant when the currently checked is not found. """ if supported is None: from django.conf import settings supported = SortedDict(settings.LANGUAGES) if lang_code: # if fr-CA is not supported, try fr-ca; if that fails, fallback to fr. generic_lang_code = lang_code.split('-')[0] variants = (lang_code, lang_code.lower(), generic_lang_code, generic_lang_code.lower()) for code in variants: if code in supported and check_for_language(code): return code if not strict: # if fr-fr is not supported, try fr-ca. for supported_code in supported: if supported_code.startswith((generic_lang_code + '-', generic_lang_code.lower() + '-')): return supported_code raise LookupError(lang_code) def get_language_from_path(path, supported=None, strict=False): """ Returns the language-code if there is a valid language-code found in the `path`. If `strict` is False (the default), the function will look for an alternative country-specific variant when the currently checked is not found. """ if supported is None: from django.conf import settings supported = SortedDict(settings.LANGUAGES) regex_match = language_code_prefix_re.match(path) if not regex_match: return None lang_code = regex_match.group(1) try: return get_supported_language_variant(lang_code, supported, strict=strict) except LookupError: return None def get_language_from_request(request, check_path=False): """ Analyzes the request to find what language the user wants the system to show. Only languages listed in settings.LANGUAGES are taken into account. If the user requests a sublanguage where we have a main language, we send out the main language. If check_path is True, the URL path prefix will be checked for a language code, otherwise this is skipped for backwards compatibility. """ global _accepted from django.conf import settings supported = SortedDict(settings.LANGUAGES) if check_path: lang_code = get_language_from_path(request.path_info, supported) if lang_code is not None: return lang_code if hasattr(request, 'session'): lang_code = request.session.get('django_language', None) if lang_code in supported and lang_code is not None and check_for_language(lang_code): return lang_code lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME) try: return get_supported_language_variant(lang_code, supported) except LookupError: pass accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '') for accept_lang, unused in parse_accept_lang_header(accept): if accept_lang == '*': break # 'normalized' is the root name of the locale in POSIX format (which is # the format used for the directories holding the MO files). normalized = locale.locale_alias.get(to_locale(accept_lang, True)) if not normalized: continue # Remove the default encoding from locale_alias. normalized = normalized.split('.')[0] if normalized in _accepted: # We've seen this locale before and have an MO file for it, so no # need to check again. return _accepted[normalized] try: accept_lang = get_supported_language_variant(accept_lang, supported) except LookupError: continue else: _accepted[normalized] = accept_lang return accept_lang try: return get_supported_language_variant(settings.LANGUAGE_CODE, supported) except LookupError: return settings.LANGUAGE_CODE dot_re = re.compile(r'\S') def blankout(src, char): """ Changes every non-whitespace character to the given char. Used in the templatize function. """ return dot_re.sub(char, src) context_re = re.compile(r"""^\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?'))\s*""") inline_re = re.compile(r"""^\s*trans\s+((?:"[^"]*?")|(?:'[^']*?'))(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?\s*""") block_re = re.compile(r"""^\s*blocktrans(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?(?:\s+|$)""") endblock_re = re.compile(r"""^\s*endblocktrans$""") plural_re = re.compile(r"""^\s*plural$""") constant_re = re.compile(r"""_\(((?:".*?")|(?:'.*?'))\)""") one_percent_re = re.compile(r"""(?<!%)%(?!%)""") def templatize(src, origin=None): """ Turns a Django template into something that is understood by xgettext. It does so by translating the Django translation tags into standard gettext function invocations. """ from django.conf import settings from django.template import (Lexer, TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK, TOKEN_COMMENT, TRANSLATOR_COMMENT_MARK) src = force_text(src, settings.FILE_CHARSET) out = StringIO() message_context = None intrans = False inplural = False singular = [] plural = [] incomment = False comment = [] lineno_comment_map = {} comment_lineno_cache = None for t in Lexer(src, origin).tokenize(): if incomment: if t.token_type == TOKEN_BLOCK and t.contents == 'endcomment': content = ''.join(comment) translators_comment_start = None for lineno, line in enumerate(content.splitlines(True)): if line.lstrip().startswith(TRANSLATOR_COMMENT_MARK): translators_comment_start = lineno for lineno, line in enumerate(content.splitlines(True)): if translators_comment_start is not None and lineno >= translators_comment_start: out.write(' # %s' % line) else: out.write(' #\n') incomment = False comment = [] else: comment.append(t.contents) elif intrans: if t.token_type == TOKEN_BLOCK: endbmatch = endblock_re.match(t.contents) pluralmatch = plural_re.match(t.contents) if endbmatch: if inplural: if message_context: out.write(' npgettext(%r, %r, %r,count) ' % (message_context, ''.join(singular), ''.join(plural))) else: out.write(' ngettext(%r, %r, count) ' % (''.join(singular), ''.join(plural))) for part in singular: out.write(blankout(part, 'S')) for part in plural: out.write(blankout(part, 'P')) else: if message_context: out.write(' pgettext(%r, %r) ' % (message_context, ''.join(singular))) else: out.write(' gettext(%r) ' % ''.join(singular)) for part in singular: out.write(blankout(part, 'S')) message_context = None intrans = False inplural = False singular = [] plural = [] elif pluralmatch: inplural = True else: filemsg = '' if origin: filemsg = 'file %s, ' % origin raise SyntaxError("Translation blocks must not include other block tags: %s (%sline %d)" % (t.contents, filemsg, t.lineno)) elif t.token_type == TOKEN_VAR: if inplural: plural.append('%%(%s)s' % t.contents) else: singular.append('%%(%s)s' % t.contents) elif t.token_type == TOKEN_TEXT: contents = one_percent_re.sub('%%', t.contents) if inplural: plural.append(contents) else: singular.append(contents) else: # Handle comment tokens (`{# ... #}`) plus other constructs on # the same line: if comment_lineno_cache is not None: cur_lineno = t.lineno + t.contents.count('\n') if comment_lineno_cache == cur_lineno: if t.token_type != TOKEN_COMMENT: for c in lineno_comment_map[comment_lineno_cache]: filemsg = '' if origin: filemsg = 'file %s, ' % origin warn_msg = ("The translator-targeted comment '%s' " "(%sline %d) was ignored, because it wasn't the last item " "on the line.") % (c, filemsg, comment_lineno_cache) warnings.warn(warn_msg, TranslatorCommentWarning) lineno_comment_map[comment_lineno_cache] = [] else: out.write('# %s' % ' | '.join(lineno_comment_map[comment_lineno_cache])) comment_lineno_cache = None if t.token_type == TOKEN_BLOCK: imatch = inline_re.match(t.contents) bmatch = block_re.match(t.contents) cmatches = constant_re.findall(t.contents) if imatch: g = imatch.group(1) if g[0] == '"': g = g.strip('"') elif g[0] == "'": g = g.strip("'") g = one_percent_re.sub('%%', g) if imatch.group(2): # A context is provided context_match = context_re.match(imatch.group(2)) message_context = context_match.group(1) if message_context[0] == '"': message_context = message_context.strip('"') elif message_context[0] == "'": message_context = message_context.strip("'") out.write(' pgettext(%r, %r) ' % (message_context, g)) message_context = None else: out.write(' gettext(%r) ' % g) elif bmatch: for fmatch in constant_re.findall(t.contents): out.write(' _(%s) ' % fmatch) if bmatch.group(1): # A context is provided context_match = context_re.match(bmatch.group(1)) message_context = context_match.group(1) if message_context[0] == '"': message_context = message_context.strip('"') elif message_context[0] == "'": message_context = message_context.strip("'") intrans = True inplural = False singular = [] plural = [] elif cmatches: for cmatch in cmatches: out.write(' _(%s) ' % cmatch) elif t.contents == 'comment': incomment = True else: out.write(blankout(t.contents, 'B')) elif t.token_type == TOKEN_VAR: parts = t.contents.split('|') cmatch = constant_re.match(parts[0]) if cmatch: out.write(' _(%s) ' % cmatch.group(1)) for p in parts[1:]: if p.find(':_(') >= 0: out.write(' %s ' % p.split(':',1)[1]) else: out.write(blankout(p, 'F')) elif t.token_type == TOKEN_COMMENT: if t.contents.lstrip().startswith(TRANSLATOR_COMMENT_MARK): lineno_comment_map.setdefault(t.lineno, []).append(t.contents) comment_lineno_cache = t.lineno else: out.write(blankout(t.contents, 'X')) return force_str(out.getvalue()) def parse_accept_lang_header(lang_string): """ Parses the lang_string, which is the body of an HTTP Accept-Language header, and returns a list of (lang, q-value), ordered by 'q' values. Any format errors in lang_string results in an empty list being returned. """ result = [] pieces = accept_language_re.split(lang_string) if pieces[-1]: return [] for i in range(0, len(pieces) - 1, 3): first, lang, priority = pieces[i : i + 3] if first: return [] if priority: priority = float(priority) if not priority: # if priority is 0.0 at this point make it 1.0 priority = 1.0 result.append((lang, priority)) result.sort(key=lambda k: k[1], reverse=True) return result
apache-2.0
kakunbsc/bb
lib/bb/fetch/cvs.py
5
6216
# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- """ BitBake 'Fetch' implementations Classes for obtaining upstream sources for the BitBake build tools. """ # Copyright (C) 2003, 2004 Chris Larson # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # #Based on functions from the base bb module, Copyright 2003 Holger Schurig # import os, re import bb from bb import data from bb.fetch import Fetch from bb.fetch import FetchError from bb.fetch import MissingParameterError class Cvs(Fetch): """ Class to fetch a module or modules from cvs repositories """ def supports(self, url, ud, d): """ Check to see if a given url can be fetched with cvs. """ return ud.type in ['cvs', 'pserver'] def localpath(self, url, ud, d): if not "module" in ud.parm: raise MissingParameterError("cvs method needs a 'module' parameter") ud.module = ud.parm["module"] ud.tag = "" if 'tag' in ud.parm: ud.tag = ud.parm['tag'] # Override the default date in certain cases if 'date' in ud.parm: ud.date = ud.parm['date'] elif ud.tag: ud.date = "" norecurse = '' if 'norecurse' in ud.parm: norecurse = '_norecurse' fullpath = '' if 'fullpath' in ud.parm: fullpath = '_fullpath' ud.localfile = data.expand('%s_%s_%s_%s%s%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.tag, ud.date, norecurse, fullpath), d) return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile) def forcefetch(self, url, ud, d): if (ud.date == "now"): return True return False def go(self, loc, ud, d): # try to use the tarball stash if not self.forcefetch(loc, ud, d) and Fetch.try_mirror(d, ud.localfile): bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists or was mirrored, skipping cvs checkout." % ud.localpath) return method = "pserver" if "method" in ud.parm: method = ud.parm["method"] localdir = ud.module if "localdir" in ud.parm: localdir = ud.parm["localdir"] cvs_port = "" if "port" in ud.parm: cvs_port = ud.parm["port"] cvs_rsh = None if method == "ext": if "rsh" in ud.parm: cvs_rsh = ud.parm["rsh"] if method == "dir": cvsroot = ud.path else: cvsroot = ":" + method cvsproxyhost = data.getVar('CVS_PROXY_HOST', d, True) if cvsproxyhost: cvsroot += ";proxy=" + cvsproxyhost cvsproxyport = data.getVar('CVS_PROXY_PORT', d, True) if cvsproxyport: cvsroot += ";proxyport=" + cvsproxyport cvsroot += ":" + ud.user if ud.pswd: cvsroot += ":" + ud.pswd cvsroot += "@" + ud.host + ":" + cvs_port + ud.path options = [] if 'norecurse' in ud.parm: options.append("-l") if ud.date: # treat YYYYMMDDHHMM specially for CVS if len(ud.date) == 12: options.append("-D \"%s %s:%s UTC\"" % (ud.date[0:8], ud.date[8:10], ud.date[10:12])) else: options.append("-D \"%s UTC\"" % ud.date) if ud.tag: options.append("-r %s" % ud.tag) localdata = data.createCopy(d) data.setVar('OVERRIDES', "cvs:%s" % data.getVar('OVERRIDES', localdata), localdata) data.update_data(localdata) data.setVar('CVSROOT', cvsroot, localdata) data.setVar('CVSCOOPTS', " ".join(options), localdata) data.setVar('CVSMODULE', ud.module, localdata) cvscmd = data.getVar('FETCHCOMMAND', localdata, 1) cvsupdatecmd = data.getVar('UPDATECOMMAND', localdata, 1) if cvs_rsh: cvscmd = "CVS_RSH=\"%s\" %s" % (cvs_rsh, cvscmd) cvsupdatecmd = "CVS_RSH=\"%s\" %s" % (cvs_rsh, cvsupdatecmd) # create module directory bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: checking for module directory") pkg = data.expand('${PN}', d) pkgdir = os.path.join(data.expand('${CVSDIR}', localdata), pkg) moddir = os.path.join(pkgdir,localdir) if os.access(os.path.join(moddir,'CVS'), os.R_OK): bb.msg.note(1, bb.msg.domain.Fetcher, "Update " + loc) # update sources there os.chdir(moddir) myret = os.system(cvsupdatecmd) else: bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc) # check out sources there bb.mkdirhier(pkgdir) os.chdir(pkgdir) bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % cvscmd) myret = os.system(cvscmd) if myret != 0 or not os.access(moddir, os.R_OK): try: os.rmdir(moddir) except OSError: pass raise FetchError(ud.module) # tar them up to a defined filename if 'fullpath' in ud.parm: os.chdir(pkgdir) myret = os.system("tar -czf %s %s" % (ud.localpath, localdir)) else: os.chdir(moddir) os.chdir('..') myret = os.system("tar -czf %s %s" % (ud.localpath, os.path.basename(moddir))) if myret != 0: try: os.unlink(ud.localpath) except OSError: pass raise FetchError(ud.module)
gpl-2.0
petecummings/django
django/contrib/auth/backends.py
468
6114
from __future__ import unicode_literals from django.contrib.auth import get_user_model from django.contrib.auth.models import Permission class ModelBackend(object): """ Authenticates against settings.AUTH_USER_MODEL. """ def authenticate(self, username=None, password=None, **kwargs): UserModel = get_user_model() if username is None: username = kwargs.get(UserModel.USERNAME_FIELD) try: user = UserModel._default_manager.get_by_natural_key(username) if user.check_password(password): return user except UserModel.DoesNotExist: # Run the default password hasher once to reduce the timing # difference between an existing and a non-existing user (#20760). UserModel().set_password(password) def _get_user_permissions(self, user_obj): return user_obj.user_permissions.all() def _get_group_permissions(self, user_obj): user_groups_field = get_user_model()._meta.get_field('groups') user_groups_query = 'group__%s' % user_groups_field.related_query_name() return Permission.objects.filter(**{user_groups_query: user_obj}) def _get_permissions(self, user_obj, obj, from_name): """ Returns the permissions of `user_obj` from `from_name`. `from_name` can be either "group" or "user" to return permissions from `_get_group_permissions` or `_get_user_permissions` respectively. """ if not user_obj.is_active or user_obj.is_anonymous() or obj is not None: return set() perm_cache_name = '_%s_perm_cache' % from_name if not hasattr(user_obj, perm_cache_name): if user_obj.is_superuser: perms = Permission.objects.all() else: perms = getattr(self, '_get_%s_permissions' % from_name)(user_obj) perms = perms.values_list('content_type__app_label', 'codename').order_by() setattr(user_obj, perm_cache_name, set("%s.%s" % (ct, name) for ct, name in perms)) return getattr(user_obj, perm_cache_name) def get_user_permissions(self, user_obj, obj=None): """ Returns a set of permission strings the user `user_obj` has from their `user_permissions`. """ return self._get_permissions(user_obj, obj, 'user') def get_group_permissions(self, user_obj, obj=None): """ Returns a set of permission strings the user `user_obj` has from the groups they belong. """ return self._get_permissions(user_obj, obj, 'group') def get_all_permissions(self, user_obj, obj=None): if not user_obj.is_active or user_obj.is_anonymous() or obj is not None: return set() if not hasattr(user_obj, '_perm_cache'): user_obj._perm_cache = self.get_user_permissions(user_obj) user_obj._perm_cache.update(self.get_group_permissions(user_obj)) return user_obj._perm_cache def has_perm(self, user_obj, perm, obj=None): if not user_obj.is_active: return False return perm in self.get_all_permissions(user_obj, obj) def has_module_perms(self, user_obj, app_label): """ Returns True if user_obj has any permissions in the given app_label. """ if not user_obj.is_active: return False for perm in self.get_all_permissions(user_obj): if perm[:perm.index('.')] == app_label: return True return False def get_user(self, user_id): UserModel = get_user_model() try: return UserModel._default_manager.get(pk=user_id) except UserModel.DoesNotExist: return None class RemoteUserBackend(ModelBackend): """ This backend is to be used in conjunction with the ``RemoteUserMiddleware`` found in the middleware module of this package, and is used when the server is handling authentication outside of Django. By default, the ``authenticate`` method creates ``User`` objects for usernames that don't already exist in the database. Subclasses can disable this behavior by setting the ``create_unknown_user`` attribute to ``False``. """ # Create a User object if not already in the database? create_unknown_user = True def authenticate(self, remote_user): """ The username passed as ``remote_user`` is considered trusted. This method simply returns the ``User`` object with the given username, creating a new ``User`` object if ``create_unknown_user`` is ``True``. Returns None if ``create_unknown_user`` is ``False`` and a ``User`` object with the given username is not found in the database. """ if not remote_user: return user = None username = self.clean_username(remote_user) UserModel = get_user_model() # Note that this could be accomplished in one try-except clause, but # instead we use get_or_create when creating unknown users since it has # built-in safeguards for multiple threads. if self.create_unknown_user: user, created = UserModel._default_manager.get_or_create(**{ UserModel.USERNAME_FIELD: username }) if created: user = self.configure_user(user) else: try: user = UserModel._default_manager.get_by_natural_key(username) except UserModel.DoesNotExist: pass return user def clean_username(self, username): """ Performs any cleaning on the "username" prior to using it to get or create the user object. Returns the cleaned username. By default, returns the username unchanged. """ return username def configure_user(self, user): """ Configures a user after creation and returns the updated user. By default, returns the user unmodified. """ return user
bsd-3-clause
VagrantApe/flaskMicroblog
venv/lib/python2.7/site-packages/whoosh/filedb/gae.py
96
4872
""" This module contains EXPERIMENTAL support for storing a Whoosh index's files in the Google App Engine blobstore. This will use a lot of RAM since all files are loaded into RAM, but it potentially useful as a workaround for the lack of file storage in Google App Engine. Use at your own risk, but please report any problems to me so I can fix them. To create a new index:: from whoosh.filedb.gae import DatastoreStorage ix = DatastoreStorage().create_index(schema) To open an existing index:: ix = DatastoreStorage().open_index() """ import time from google.appengine.api import memcache # @UnresolvedImport from google.appengine.ext import db # @UnresolvedImport from whoosh.compat import BytesIO from whoosh.index import TOC, FileIndex, _DEF_INDEX_NAME from whoosh.filedb.filestore import ReadOnlyError, Storage from whoosh.filedb.structfile import StructFile class DatastoreFile(db.Model): """A file-like object that is backed by a BytesIO() object whose contents is loaded from a BlobProperty in the app engine datastore. """ value = db.BlobProperty() mtime = db.IntegerProperty(default=0) def __init__(self, *args, **kwargs): super(DatastoreFile, self).__init__(*args, **kwargs) self.data = BytesIO() @classmethod def loadfile(cls, name): value = memcache.get(name, namespace="DatastoreFile") if value is None: file = cls.get_by_key_name(name) memcache.set(name, file.value, namespace="DatastoreFile") else: file = cls(value=value) file.data = BytesIO(file.value) return file def close(self): oldvalue = self.value self.value = self.getvalue() if oldvalue != self.value: self.mtime = int(time.time()) self.put() memcache.set(self.key().id_or_name(), self.value, namespace="DatastoreFile") def tell(self): return self.data.tell() def write(self, data): return self.data.write(data) def read(self, length): return self.data.read(length) def seek(self, *args): return self.data.seek(*args) def readline(self): return self.data.readline() def getvalue(self): return self.data.getvalue() class MemcacheLock(object): def __init__(self, name): self.name = name def acquire(self, blocking=False): val = memcache.add(self.name, "L", 360, namespace="whooshlocks") if blocking and not val: # Simulate blocking by retrying the acquire over and over import time while not val: time.sleep(0.1) val = memcache.add(self.name, "", 360, namespace="whooshlocks") return val def release(self): memcache.delete(self.name, namespace="whooshlocks") class DatastoreStorage(Storage): """An implementation of :class:`whoosh.store.Storage` that stores files in the app engine datastore as blob properties. """ def create_index(self, schema, indexname=_DEF_INDEX_NAME): if self.readonly: raise ReadOnlyError TOC.create(self, schema, indexname) return FileIndex(self, schema, indexname) def open_index(self, indexname=_DEF_INDEX_NAME, schema=None): return FileIndex(self, schema=schema, indexname=indexname) def list(self): query = DatastoreFile.all() keys = [] for file in query: keys.append(file.key().id_or_name()) return keys def clean(self): pass def total_size(self): return sum(self.file_length(f) for f in self.list()) def file_exists(self, name): return DatastoreFile.get_by_key_name(name) is not None def file_modified(self, name): return DatastoreFile.get_by_key_name(name).mtime def file_length(self, name): return len(DatastoreFile.get_by_key_name(name).value) def delete_file(self, name): memcache.delete(name, namespace="DatastoreFile") return DatastoreFile.get_by_key_name(name).delete() def rename_file(self, name, newname, safe=False): file = DatastoreFile.get_by_key_name(name) newfile = DatastoreFile(key_name=newname) newfile.value = file.value newfile.mtime = file.mtime newfile.put() file.delete() def create_file(self, name, **kwargs): f = StructFile(DatastoreFile(key_name=name), name=name, onclose=lambda sfile: sfile.file.close()) return f def open_file(self, name, *args, **kwargs): return StructFile(DatastoreFile.loadfile(name)) def lock(self, name): return MemcacheLock(name) def temp_storage(self, name=None): tempstore = DatastoreStorage() return tempstore.create()
bsd-3-clause
lzjever/django-guardian
docs/conf.py
24
7168
# -*- coding: utf-8 -*- # # django-guardian documentation build configuration file, created by # sphinx-quickstart on Thu Feb 18 23:18:28 2010. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('..')) sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__)))) os.environ['DJANGO_SETTINGS_MODULE'] = 'guardian.testsettings' ANONYMOUS_USER_ID = -1 # Required by guardian guardian = __import__('guardian') # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'exts'] try: import rst2pdf if rst2pdf.version >= '0.16': extensions.append('rst2pdf.pdfbuilder') except ImportError: print "[NOTE] In order to build PDF you need rst2pdf with version >=0.16" autoclass_content = "both" # Add any paths that contain templates here, relative to this directory. templates_path = ['templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'django-guardian' copyright = u'2010-2012, Lukasz Balcerzak' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = guardian.get_version() # The full version, including alpha/beta/rc tags. release = guardian.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = ['build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. #html_theme = 'default' # Theme URL: https://github.com/coordt/ADCtheme/ html_theme = 'ADCtheme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ['theme'] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'guardiandoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'guardian.tex', u'guardian Documentation', u'Lukasz Balcerzak', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True pdf_documents = [ ('index', u'django-guardian', u'Documentation for django-guardian', u'Lukasz Balcerzak'), ] pdf_stylesheets = ['sphinx','kerning','a4'] pdf_break_level = 2 pdf_inline_footnotes = True #pdf_extensions = ['vectorpdf', 'dotted_toc']
bsd-2-clause
akuster/yali
yali/gui/ScrAdmin.py
1
6936
# -*- coding: utf-8 -*- # # Copyright (C) 2005-2010 TUBITAK/UEKAE # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation; either version 2 of the License, or (at your option) # any later version. # # Please read the COPYING file. # import pardus.xorg import gettext _ = gettext.translation('yali', fallback=True).ugettext from PyQt4.Qt import QWidget, SIGNAL, QLineEdit, QTimer from pds.thread import PThread from pds.gui import PMessageBox, MIDCENTER, CURRENT, OUT import yali.util import yali.postinstall import yali.storage import yali.context as ctx from yali.gui import ScreenWidget from yali.gui.Ui.rootpasswidget import Ui_RootPassWidget class Widget(QWidget, ScreenWidget): name = "admin" def __init__(self): QWidget.__init__(self) self.ui = Ui_RootPassWidget() self.ui.setupUi(self) self.intf = ctx.interface self.host_valid = True self.pass_valid = False if ctx.flags.install_type == ctx.STEP_DEFAULT: self.pthread = PThread(self, self.startInit, self.dummy) self.pds_messagebox = PMessageBox(self) self.pds_messagebox.enableOverlay() self.connect(self.ui.pass1, SIGNAL("textChanged(const QString &)"), self.slotTextChanged) self.connect(self.ui.pass2, SIGNAL("textChanged(const QString &)"), self.slotTextChanged) self.connect(self.ui.pass2, SIGNAL("returnPressed()"), self.slotReturnPressed) self.connect(self.ui.hostname, SIGNAL("textChanged(const QString &)"), self.slotHostnameChanged) def update(self): if self.host_valid and self.pass_valid: ctx.mainScreen.enableNext() else: ctx.mainScreen.disableNext() def shown(self): if ctx.installData.hostName: self.ui.hostname.setText(str(ctx.installData.hostName)) else: # Use first added user's name as machine name if its exists release_hostname = yali.util.product_release() if self.ui.hostname.text() == '': self.ui.hostname.setText(release_hostname) if ctx.installData.rootPassword: self.ui.pass1.setText(ctx.installData.rootPassword) self.ui.pass2.setText(ctx.installData.rootPassword) self.update() self.checkCapsLock() self.ui.pass1.setFocus() def dummy(self): pass def execute(self): ctx.installData.rootPassword = unicode(self.ui.pass1.text()) ctx.installData.hostName = unicode(self.ui.hostname.text()) if ctx.flags.install_type == ctx.STEP_DEFAULT: #FIXME:Refactor dirty code if ctx.storageInitialized: disks = filter(lambda d: not d.format.hidden, ctx.storage.disks) if len(disks) == 1: ctx.storage.clearPartDisks = [disks[0].name] ctx.mainScreen.step_increment = 2 else: ctx.mainScreen.step_increment = 1 else: self.pds_messagebox.setMessage(_("Storage Devices initialising...")) self.pds_messagebox.animate(start=MIDCENTER, stop=MIDCENTER) ctx.mainScreen.step_increment = 0 self.pthread.start() QTimer.singleShot(2, self.startStorageInitialize) return False return True def startInit(self): self.pds_messagebox.animate(start=MIDCENTER, stop=MIDCENTER) def startStorageInitialize(self): ctx.storageInitialized = yali.storage.initialize(ctx.storage, ctx.interface) self.initFinished() def initFinished(self): self.pds_messagebox.animate(start=CURRENT, stop=CURRENT, direction=OUT) disks = filter(lambda d: not d.format.hidden, ctx.storage.disks) if ctx.storageInitialized: if len(disks) == 1: ctx.storage.clearPartDisks = [disks[0].name] ctx.mainScreen.step_increment = 2 else: ctx.mainScreen.step_increment = 1 ctx.mainScreen.slotNext(dry_run=True) else: ctx.mainScreen.enableBack() def setCapsLockIcon(self, child): if type(child) == QLineEdit: if pardus.xorg.capslock.isOn(): child.setStyleSheet("""QLineEdit { background-image: url(:/gui/pics/caps.png); background-repeat: no-repeat; background-position: right; padding-right: 35px; }""") else: child.setStyleSheet("""QLineEdit { background-image: none; padding-right: 0px; }""") def checkCapsLock(self): for child in self.ui.groupBox.children(): self.setCapsLockIcon(child) def keyReleaseEvent(self, event): self.checkCapsLock() def slotTextChanged(self): password = str(self.ui.pass1.text()) password_confirm = str(self.ui.pass2.text()) if password and password == password_confirm: if len(password) < 4: self.intf.informationWindow.update(_('Password is too short.'), type="error") self.pass_valid = False else: self.intf.informationWindow.hide() self.pass_valid = True else: self.pass_valid = False if password_confirm: self.intf.informationWindow.update(_('Passwords do not match.'), type="error") if password.lower()=="root" or password_confirm.lower()=="root": self.pass_valid = False if password_confirm: self.intf.informationWindow.update(_('Do not use your username as your password.'), type="error") if self.pass_valid: self.intf.informationWindow.hide() self.update() def slotHostnameChanged(self, hostname): if len(hostname) > 64: self.host_valid = False self.intf.informationWindow.update(_('Hostname cannot be longer than 64 characters.'), type="error") self.update() return if not hostname.toAscii(): self.host_valid = False self.update() return self.host_valid = yali.util.is_text_valid(hostname.toAscii()) if not self.host_valid: self.intf.informationWindow.update(_('Hostname contains invalid characters.'), type="error") else: self.intf.informationWindow.hide() self.update() def slotReturnPressed(self): if ctx.mainScreen.isNextEnabled(): ctx.mainScreen.slotNext()
gpl-2.0
zacernst/python_cypher
test/test.py
1
1583
import unittest import networkx as nx from python_cypher import python_cypher class TestPythonCypher(unittest.TestCase): def test_upper(self): """Test we can parse a CREATE... RETURN query.""" g = nx.MultiDiGraph() query = 'CREATE (n:SOMECLASS) RETURN n' test_parser = python_cypher.CypherToNetworkx() test_parser.query(g, query) def test_create_node(self): """Test we can build a query and create a node""" g = nx.MultiDiGraph() query = 'CREATE (n) RETURN n' test_parser = python_cypher.CypherToNetworkx() for i in test_parser.query(g, query): pass self.assertEqual(len(g.node), 1) def test_create_node_and_edge(self): """Test we can build a query and create two nodes and an edge""" g = nx.MultiDiGraph() query = 'CREATE (n)-->(m) RETURN n, m' test_parser = python_cypher.CypherToNetworkx() for i in test_parser.query(g, query): pass self.assertEqual(len(g.node), 2) self.assertEqual(len(g.edge), 2) def test_return_attribute(self): """Test we can return attribute from matching node""" g = nx.MultiDiGraph() create_query = 'CREATE (n:SOMECLASS {foo: "bar"}) RETURN n' match_query = 'MATCH (n) RETURN n.foo' test_parser = python_cypher.CypherToNetworkx() list(test_parser.query(g, create_query)) out = list(test_parser.query(g, match_query)) # self.assertEqual(out[0], ['bar']) if __name__ == '__main__': unittest.main()
gpl-2.0
asmodehn/pyros
tests/test_pyros/test_ros_ctx_server.py
2
1788
from __future__ import absolute_import, division, print_function import time import pytest from pyros.client import PyrosClient from pyros.server.ctx_server import pyros_ctx pyros_interfaces_ros = pytest.importorskip("pyros_interfaces_ros") # , minversion="0.4") # TODO : make version avialable in pyros_interfaces_ros def testPyrosROSCtx(): # start ros system , before PyrosROS process and Client otherwise Client assumes there is problem ( discovery timeout ) # we might need to load pyros_setup here... try: import pyros_utils except ImportError: # TODO : find a proper way to log from a test like here... try : #_logger.warning("loading pyros_setup and configuring your ROS environment") import pyros_setup # This will load the pyros_setup configuration from the environment pyros_setup.configurable_import().configure().activate() import pyros_utils except ImportError: # This is expected when testing pyros by itself raise nose.SkipTest("pyros_utils could not be imported, and trying to import pyros_setup for dynamic ros setup failed.") master, roscore_proc = pyros_utils.get_master(spawn=True) # we start the master if needed assert master.is_online() with pyros_ctx(node_impl=pyros_interfaces_ros.PyrosROS) as ctx: assert isinstance(ctx.client, PyrosClient) # TODO : assert the context manager does his job ( HOW ? ) if roscore_proc is not None: roscore_proc.terminate() while roscore_proc.is_alive(): time.sleep(0.2) # waiting for roscore to die # Just in case we run this directly if __name__ == '__main__': import pytest pytest.main([ '-s', __file__, ])
bsd-3-clause
zhxwmessi/or-tools
examples/python/production.py
34
2848
# Copyright 2011 Hakan Kjellerstrand hakank@bonetmail.com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Production planning problem in Google or-tools. From the OPL model production.mod. This model was created by Hakan Kjellerstrand (hakank@bonetmail.com) Also see my other Google CP Solver models: http://www.hakank.org/google_or_tools/ """ import sys from ortools.linear_solver import pywraplp def main(sol='GLPK'): # Create the solver. # using GLPK if sol == 'GLPK': solver = pywraplp.Solver('CoinsGridGLPK', pywraplp.Solver.GLPK_LINEAR_PROGRAMMING) else: # Using CLP solver = pywraplp.Solver('CoinsGridCLP', pywraplp.Solver.CLP_LINEAR_PROGRAMMING) # # data # kluski = 0 capellini = 1 fettucine = 2 products = ['kluski', 'capellini', 'fettucine'] num_products = len(products) flour = 0 eggs = 1 resources = ['flour', 'eggs'] num_resources = len(resources) consumption = [[0.5, 0.2], [0.4, 0.4], [0.3, 0.6]] capacity = [20, 40] demand = [100, 200, 300] inside_cost = [0.6, 0.8, 0.3] outside_cost = [0.8, 0.9, 0.4] # # declare variables # inside = [solver.NumVar(0, 10000, 'inside[%i]' % p) for p in range(num_products)] outside = [solver.NumVar(0, 10000, 'outside[%i]' % p) for p in range(num_products)] # to minimize z = solver.Sum([inside_cost[p] * inside[p] + outside_cost[p] * outside[p] for p in range(num_products)]) # # constraints # for r in range(num_resources): solver.Add(solver.Sum( [consumption[p][r] * inside[p] for p in range(num_products)]) <= capacity[r]) for p in range(num_products): solver.Add(inside[p] + outside[p] >= demand[p]) objective = solver.Minimize(z) solver.Solve() print print 'z = ', solver.Objective().Value() for p in range(num_products): print products[p], ': inside:', inside[p].SolutionValue(), '(ReducedCost:', inside[p].ReducedCost(), ')', print 'outside:', outside[p].SolutionValue(), ' (ReducedCost:', outside[p].ReducedCost(), ')' print if __name__ == '__main__': sol = 'CBC' if len(sys.argv) > 1: sol = sys.argv[1] if sol != 'GLPK' and sol != 'CBC': print 'Solver must be either GLPK or CBC' sys.exit(1) main(sol)
apache-2.0
p4datasystems/CarnotKE
jyhton/lib-python/2.7/encodings/base64_codec.py
528
2338
""" Python 'base64_codec' Codec - base64 content transfer encoding Unlike most of the other codecs which target Unicode, this codec will return Python string objects for both encode and decode. Written by Marc-Andre Lemburg (mal@lemburg.com). """ import codecs, base64 ### Codec APIs def base64_encode(input,errors='strict'): """ Encodes the object input and returns a tuple (output object, length consumed). errors defines the error handling to apply. It defaults to 'strict' handling which is the only currently supported error handling for this codec. """ assert errors == 'strict' output = base64.encodestring(input) return (output, len(input)) def base64_decode(input,errors='strict'): """ Decodes the object input and returns a tuple (output object, length consumed). input must be an object which provides the bf_getreadbuf buffer slot. Python strings, buffer objects and memory mapped files are examples of objects providing this slot. errors defines the error handling to apply. It defaults to 'strict' handling which is the only currently supported error handling for this codec. """ assert errors == 'strict' output = base64.decodestring(input) return (output, len(input)) class Codec(codecs.Codec): def encode(self, input,errors='strict'): return base64_encode(input,errors) def decode(self, input,errors='strict'): return base64_decode(input,errors) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): assert self.errors == 'strict' return base64.encodestring(input) class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): assert self.errors == 'strict' return base64.decodestring(input) class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='base64', encode=base64_encode, decode=base64_decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamwriter=StreamWriter, streamreader=StreamReader, )
apache-2.0
kuzetsa/android_kernel_htc_msm8974
Documentation/networking/cxacru-cf.py
14668
1626
#!/usr/bin/env python # Copyright 2009 Simon Arlott # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the Free # Software Foundation; either version 2 of the License, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 59 # Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # Usage: cxacru-cf.py < cxacru-cf.bin # Output: values string suitable for the sysfs adsl_config attribute # # Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110 # contains mis-aligned values which will stop the modem from being able # to make a connection. If the first and last two bytes are removed then # the values become valid, but the modulation will be forced to ANSI # T1.413 only which may not be appropriate. # # The original binary format is a packed list of le32 values. import sys import struct i = 0 while True: buf = sys.stdin.read(4) if len(buf) == 0: break elif len(buf) != 4: sys.stdout.write("\n") sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf))) sys.exit(1) if i > 0: sys.stdout.write(" ") sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0])) i += 1 sys.stdout.write("\n")
gpl-2.0
weimingtom/python-for-android
python-modules/twisted/twisted/internet/test/test_endpoints.py
49
41233
# Copyright (c) 2007-2010 Twisted Matrix Laboratories. # See LICENSE for details. """ Test the C{I...Endpoint} implementations that wrap the L{IReactorTCP}, L{IReactorSSL}, and L{IReactorUNIX} interfaces found in L{twisted.internet.endpoints}. """ from errno import EPERM from zope.interface import implements from twisted.trial import unittest from twisted.internet import error, interfaces from twisted.internet import endpoints from twisted.internet.address import IPv4Address, UNIXAddress from twisted.internet.protocol import ClientFactory, Protocol from twisted.test.proto_helpers import MemoryReactor, RaisingMemoryReactor from twisted.python.failure import Failure from twisted import plugins from twisted.python.modules import getModule from twisted.python.filepath import FilePath pemPath = getModule("twisted.test").filePath.sibling("server.pem") casPath = getModule(__name__).filePath.sibling("fake_CAs") escapedPEMPathName = endpoints.quoteStringArgument(pemPath.path) escapedCAsPathName = endpoints.quoteStringArgument(casPath.path) try: from twisted.test.test_sslverify import makeCertificate from twisted.internet.ssl import CertificateOptions, Certificate, \ KeyPair, PrivateCertificate from OpenSSL.SSL import ContextType testCertificate = Certificate.loadPEM(pemPath.getContent()) testPrivateCertificate = PrivateCertificate.loadPEM(pemPath.getContent()) skipSSL = False except ImportError: skipSSL = "OpenSSL is required to construct SSL Endpoints" class TestProtocol(Protocol): """ Protocol whose only function is to callback deferreds on the factory when it is connected or disconnected. """ def __init__(self): self.data = [] self.connectionsLost = [] self.connectionMadeCalls = 0 def connectionMade(self): self.connectionMadeCalls += 1 def dataReceived(self, data): self.data.append(data) def connectionLost(self, reason): self.connectionsLost.append(reason) class TestHalfCloseableProtocol(TestProtocol): """ A Protocol that implements L{IHalfCloseableProtocol} and records that its C{readConnectionLost} and {writeConnectionLost} methods. """ implements(interfaces.IHalfCloseableProtocol) def __init__(self): TestProtocol.__init__(self) self.readLost = False self.writeLost = False def readConnectionLost(self): self.readLost = True def writeConnectionLost(self): self.writeLost = True class TestFactory(ClientFactory): """ Simple factory to be used both when connecting and listening. It contains two deferreds which are called back when my protocol connects and disconnects. """ protocol = TestProtocol class WrappingFactoryTests(unittest.TestCase): """ Test the behaviour of our ugly implementation detail C{_WrappingFactory}. """ def test_failedBuildProtocol(self): """ An exception raised in C{buildProtocol} of our wrappedFactory results in our C{onConnection} errback being fired. """ class BogusFactory(ClientFactory): """ A one off factory whose C{buildProtocol} raises an C{Exception}. """ def buildProtocol(self, addr): raise ValueError("My protocol is poorly defined.") wf = endpoints._WrappingFactory(BogusFactory(), None) wf.buildProtocol(None) d = self.assertFailure(wf._onConnection, ValueError) d.addCallback(lambda e: self.assertEquals( e.args, ("My protocol is poorly defined.",))) return d def test_wrappedProtocolDataReceived(self): """ The wrapped C{Protocol}'s C{dataReceived} will get called when our C{_WrappingProtocol}'s C{dataReceived} gets called. """ wf = endpoints._WrappingFactory(TestFactory(), None) p = wf.buildProtocol(None) p.makeConnection(None) p.dataReceived('foo') self.assertEquals(p._wrappedProtocol.data, ['foo']) p.dataReceived('bar') self.assertEquals(p._wrappedProtocol.data, ['foo', 'bar']) def test_wrappedProtocolTransport(self): """ Our transport is properly hooked up to the wrappedProtocol when a connection is made. """ wf = endpoints._WrappingFactory(TestFactory(), None) p = wf.buildProtocol(None) dummyTransport = object() p.makeConnection(dummyTransport) self.assertEquals(p.transport, dummyTransport) self.assertEquals(p._wrappedProtocol.transport, dummyTransport) def test_wrappedProtocolConnectionLost(self): """ Our wrappedProtocol's connectionLost method is called when L{_WrappingProtocol.connectionLost} is called. """ tf = TestFactory() wf = endpoints._WrappingFactory(tf, None) p = wf.buildProtocol(None) p.connectionLost("fail") self.assertEquals(p._wrappedProtocol.connectionsLost, ["fail"]) def test_clientConnectionFailed(self): """ Calls to L{_WrappingFactory.clientConnectionLost} should errback the L{_WrappingFactory._onConnection} L{Deferred} """ wf = endpoints._WrappingFactory(TestFactory(), None) expectedFailure = Failure(error.ConnectError(string="fail")) wf.clientConnectionFailed( None, expectedFailure) errors = [] def gotError(f): errors.append(f) wf._onConnection.addErrback(gotError) self.assertEquals(errors, [expectedFailure]) def test_wrappingProtocolHalfCloseable(self): """ Our L{_WrappingProtocol} should be an L{IHalfCloseableProtocol} if the C{wrappedProtocol} is. """ cd = object() hcp = TestHalfCloseableProtocol() p = endpoints._WrappingProtocol(cd, hcp) self.assertEquals( interfaces.IHalfCloseableProtocol.providedBy(p), True) def test_wrappingProtocolNotHalfCloseable(self): """ Our L{_WrappingProtocol} should not provide L{IHalfCloseableProtocol} if the C{WrappedProtocol} doesn't. """ tp = TestProtocol() p = endpoints._WrappingProtocol(None, tp) self.assertEquals( interfaces.IHalfCloseableProtocol.providedBy(p), False) def test_wrappedProtocolReadConnectionLost(self): """ L{_WrappingProtocol.readConnectionLost} should proxy to the wrapped protocol's C{readConnectionLost} """ hcp = TestHalfCloseableProtocol() p = endpoints._WrappingProtocol(None, hcp) p.readConnectionLost() self.assertEquals(hcp.readLost, True) def test_wrappedProtocolWriteConnectionLost(self): """ L{_WrappingProtocol.writeConnectionLost} should proxy to the wrapped protocol's C{writeConnectionLost} """ hcp = TestHalfCloseableProtocol() p = endpoints._WrappingProtocol(None, hcp) p.writeConnectionLost() self.assertEquals(hcp.writeLost, True) class EndpointTestCaseMixin(object): """ Generic test methods to be mixed into all endpoint test classes. """ def retrieveConnectedFactory(self, reactor): """ Retrieve a single factory that has connected using the given reactor. (This behavior is valid for TCP and SSL but needs to be overridden for UNIX.) @param reactor: a L{MemoryReactor} """ return self.expectedClients(reactor)[0][2] def test_endpointConnectSuccess(self): """ A client endpoint can connect and returns a deferred who gets called back with a protocol instance. """ proto = object() mreactor = MemoryReactor() clientFactory = object() ep, expectedArgs, ignoredDest = self.createClientEndpoint( mreactor, clientFactory) d = ep.connect(clientFactory) receivedProtos = [] def checkProto(p): receivedProtos.append(p) d.addCallback(checkProto) factory = self.retrieveConnectedFactory(mreactor) factory._onConnection.callback(proto) self.assertEquals(receivedProtos, [proto]) expectedClients = self.expectedClients(mreactor) self.assertEquals(len(expectedClients), 1) self.assertConnectArgs(expectedClients[0], expectedArgs) def test_endpointConnectFailure(self): """ If an endpoint tries to connect to a non-listening port it gets a C{ConnectError} failure. """ expectedError = error.ConnectError(string="Connection Failed") mreactor = RaisingMemoryReactor(connectException=expectedError) clientFactory = object() ep, ignoredArgs, ignoredDest = self.createClientEndpoint( mreactor, clientFactory) d = ep.connect(clientFactory) receivedExceptions = [] def checkFailure(f): receivedExceptions.append(f.value) d.addErrback(checkFailure) self.assertEquals(receivedExceptions, [expectedError]) def test_endpointConnectingCancelled(self): """ Calling L{Deferred.cancel} on the L{Deferred} returned from L{IStreamClientEndpoint.connect} is errbacked with an expected L{ConnectingCancelledError} exception. """ mreactor = MemoryReactor() clientFactory = object() ep, ignoredArgs, address = self.createClientEndpoint( mreactor, clientFactory) d = ep.connect(clientFactory) receivedFailures = [] def checkFailure(f): receivedFailures.append(f) d.addErrback(checkFailure) d.cancel() self.assertEquals(len(receivedFailures), 1) failure = receivedFailures[0] self.assertIsInstance(failure.value, error.ConnectingCancelledError) self.assertEquals(failure.value.address, address) def test_endpointListenSuccess(self): """ An endpoint can listen and returns a deferred that gets called back with a port instance. """ mreactor = MemoryReactor() factory = object() ep, expectedArgs, expectedHost = self.createServerEndpoint( mreactor, factory) d = ep.listen(factory) receivedHosts = [] def checkPortAndServer(port): receivedHosts.append(port.getHost()) d.addCallback(checkPortAndServer) self.assertEquals(receivedHosts, [expectedHost]) self.assertEquals(self.expectedServers(mreactor), [expectedArgs]) def test_endpointListenFailure(self): """ When an endpoint tries to listen on an already listening port, a C{CannotListenError} failure is errbacked. """ factory = object() exception = error.CannotListenError('', 80, factory) mreactor = RaisingMemoryReactor(listenException=exception) ep, ignoredArgs, ignoredDest = self.createServerEndpoint( mreactor, factory) d = ep.listen(object()) receivedExceptions = [] def checkFailure(f): receivedExceptions.append(f.value) d.addErrback(checkFailure) self.assertEquals(receivedExceptions, [exception]) def test_endpointConnectNonDefaultArgs(self): """ The endpoint should pass it's connectArgs parameter to the reactor's listen methods. """ factory = object() mreactor = MemoryReactor() ep, expectedArgs, ignoredHost = self.createClientEndpoint( mreactor, factory, **self.connectArgs()) ep.connect(factory) expectedClients = self.expectedClients(mreactor) self.assertEquals(len(expectedClients), 1) self.assertConnectArgs(expectedClients[0], expectedArgs) def test_endpointListenNonDefaultArgs(self): """ The endpoint should pass it's listenArgs parameter to the reactor's listen methods. """ factory = object() mreactor = MemoryReactor() ep, expectedArgs, ignoredHost = self.createServerEndpoint( mreactor, factory, **self.listenArgs()) ep.listen(factory) expectedServers = self.expectedServers(mreactor) self.assertEquals(expectedServers, [expectedArgs]) class TCP4EndpointsTestCase(EndpointTestCaseMixin, unittest.TestCase): """ Tests for TCP Endpoints. """ def expectedServers(self, reactor): """ @return: List of calls to L{IReactorTCP.listenTCP} """ return reactor.tcpServers def expectedClients(self, reactor): """ @return: List of calls to L{IReactorTCP.connectTCP} """ return reactor.tcpClients def assertConnectArgs(self, receivedArgs, expectedArgs): """ Compare host, port, timeout, and bindAddress in C{receivedArgs} to C{expectedArgs}. We ignore the factory because we don't only care what protocol comes out of the C{IStreamClientEndpoint.connect} call. @param receivedArgs: C{tuple} of (C{host}, C{port}, C{factory}, C{timeout}, C{bindAddress}) that was passed to L{IReactorTCP.connectTCP}. @param expectedArgs: C{tuple} of (C{host}, C{port}, C{factory}, C{timeout}, C{bindAddress}) that we expect to have been passed to L{IReactorTCP.connectTCP}. """ (host, port, ignoredFactory, timeout, bindAddress) = receivedArgs (expectedHost, expectedPort, _ignoredFactory, expectedTimeout, expectedBindAddress) = expectedArgs self.assertEquals(host, expectedHost) self.assertEquals(port, expectedPort) self.assertEquals(timeout, expectedTimeout) self.assertEquals(bindAddress, expectedBindAddress) def connectArgs(self): """ @return: C{dict} of keyword arguments to pass to connect. """ return {'timeout': 10, 'bindAddress': ('localhost', 49595)} def listenArgs(self): """ @return: C{dict} of keyword arguments to pass to listen """ return {'backlog': 100, 'interface': '127.0.0.1'} def createServerEndpoint(self, reactor, factory, **listenArgs): """ Create an L{TCP4ServerEndpoint} and return the values needed to verify its behaviour. @param reactor: A fake L{IReactorTCP} that L{TCP4ServerEndpoint} can call L{IReactorTCP.listenTCP} on. @param factory: The thing that we expect to be passed to our L{IStreamServerEndpoint.listen} implementation. @param listenArgs: Optional dictionary of arguments to L{IReactorTCP.listenTCP}. """ address = IPv4Address("TCP", "0.0.0.0", 0) if listenArgs is None: listenArgs = {} return (endpoints.TCP4ServerEndpoint(reactor, address.port, **listenArgs), (address.port, factory, listenArgs.get('backlog', 50), listenArgs.get('interface', '')), address) def createClientEndpoint(self, reactor, clientFactory, **connectArgs): """ Create an L{TCP4ClientEndpoint} and return the values needed to verify its behavior. @param reactor: A fake L{IReactorTCP} that L{TCP4ClientEndpoint} can call L{IReactorTCP.connectTCP} on. @param clientFactory: The thing that we expect to be passed to our L{IStreamClientEndpoint.connect} implementation. @param connectArgs: Optional dictionary of arguments to L{IReactorTCP.connectTCP} """ address = IPv4Address("TCP", "localhost", 80) return (endpoints.TCP4ClientEndpoint(reactor, address.host, address.port, **connectArgs), (address.host, address.port, clientFactory, connectArgs.get('timeout', 30), connectArgs.get('bindAddress', None)), address) class SSL4EndpointsTestCase(EndpointTestCaseMixin, unittest.TestCase): """ Tests for SSL Endpoints. """ if skipSSL: skip = skipSSL def expectedServers(self, reactor): """ @return: List of calls to L{IReactorSSL.listenSSL} """ return reactor.sslServers def expectedClients(self, reactor): """ @return: List of calls to L{IReactorSSL.connectSSL} """ return reactor.sslClients def assertConnectArgs(self, receivedArgs, expectedArgs): """ Compare host, port, contextFactory, timeout, and bindAddress in C{receivedArgs} to C{expectedArgs}. We ignore the factory because we don't only care what protocol comes out of the C{IStreamClientEndpoint.connect} call. @param receivedArgs: C{tuple} of (C{host}, C{port}, C{factory}, C{contextFactory}, C{timeout}, C{bindAddress}) that was passed to L{IReactorSSL.connectSSL}. @param expectedArgs: C{tuple} of (C{host}, C{port}, C{factory}, C{contextFactory}, C{timeout}, C{bindAddress}) that we expect to have been passed to L{IReactorSSL.connectSSL}. """ (host, port, ignoredFactory, contextFactory, timeout, bindAddress) = receivedArgs (expectedHost, expectedPort, _ignoredFactory, expectedContextFactory, expectedTimeout, expectedBindAddress) = expectedArgs self.assertEquals(host, expectedHost) self.assertEquals(port, expectedPort) self.assertEquals(contextFactory, expectedContextFactory) self.assertEquals(timeout, expectedTimeout) self.assertEquals(bindAddress, expectedBindAddress) def connectArgs(self): """ @return: C{dict} of keyword arguments to pass to connect. """ return {'timeout': 10, 'bindAddress': ('localhost', 49595)} def listenArgs(self): """ @return: C{dict} of keyword arguments to pass to listen """ return {'backlog': 100, 'interface': '127.0.0.1'} def setUp(self): """ Set up client and server SSL contexts for use later. """ self.sKey, self.sCert = makeCertificate( O="Server Test Certificate", CN="server") self.cKey, self.cCert = makeCertificate( O="Client Test Certificate", CN="client") self.serverSSLContext = CertificateOptions( privateKey=self.sKey, certificate=self.sCert, requireCertificate=False) self.clientSSLContext = CertificateOptions( requireCertificate=False) def createServerEndpoint(self, reactor, factory, **listenArgs): """ Create an L{SSL4ServerEndpoint} and return the tools to verify its behaviour. @param factory: The thing that we expect to be passed to our L{IStreamServerEndpoint.listen} implementation. @param reactor: A fake L{IReactorSSL} that L{SSL4ServerEndpoint} can call L{IReactorSSL.listenSSL} on. @param listenArgs: Optional dictionary of arguments to L{IReactorSSL.listenSSL}. """ address = IPv4Address("TCP", "0.0.0.0", 0) return (endpoints.SSL4ServerEndpoint(reactor, address.port, self.serverSSLContext, **listenArgs), (address.port, factory, self.serverSSLContext, listenArgs.get('backlog', 50), listenArgs.get('interface', '')), address) def createClientEndpoint(self, reactor, clientFactory, **connectArgs): """ Create an L{SSL4ClientEndpoint} and return the values needed to verify its behaviour. @param reactor: A fake L{IReactorSSL} that L{SSL4ClientEndpoint} can call L{IReactorSSL.connectSSL} on. @param clientFactory: The thing that we expect to be passed to our L{IStreamClientEndpoint.connect} implementation. @param connectArgs: Optional dictionary of arguments to L{IReactorSSL.connectSSL} """ address = IPv4Address("TCP", "localhost", 80) if connectArgs is None: connectArgs = {} return (endpoints.SSL4ClientEndpoint(reactor, address.host, address.port, self.clientSSLContext, **connectArgs), (address.host, address.port, clientFactory, self.clientSSLContext, connectArgs.get('timeout', 30), connectArgs.get('bindAddress', None)), address) class UNIXEndpointsTestCase(EndpointTestCaseMixin, unittest.TestCase): """ Tests for UnixSocket Endpoints. """ def retrieveConnectedFactory(self, reactor): """ Override L{EndpointTestCaseMixin.retrieveConnectedFactory} to account for different index of 'factory' in C{connectUNIX} args. """ return self.expectedClients(reactor)[0][1] def expectedServers(self, reactor): """ @return: List of calls to L{IReactorUNIX.listenUNIX} """ return reactor.unixServers def expectedClients(self, reactor): """ @return: List of calls to L{IReactorUNIX.connectUNIX} """ return reactor.unixClients def assertConnectArgs(self, receivedArgs, expectedArgs): """ Compare path, timeout, checkPID in C{receivedArgs} to C{expectedArgs}. We ignore the factory because we don't only care what protocol comes out of the C{IStreamClientEndpoint.connect} call. @param receivedArgs: C{tuple} of (C{path}, C{timeout}, C{checkPID}) that was passed to L{IReactorUNIX.connectUNIX}. @param expectedArgs: C{tuple} of (C{path}, C{timeout}, C{checkPID}) that we expect to have been passed to L{IReactorUNIX.connectUNIX}. """ (path, ignoredFactory, timeout, checkPID) = receivedArgs (expectedPath, _ignoredFactory, expectedTimeout, expectedCheckPID) = expectedArgs self.assertEquals(path, expectedPath) self.assertEquals(timeout, expectedTimeout) self.assertEquals(checkPID, expectedCheckPID) def connectArgs(self): """ @return: C{dict} of keyword arguments to pass to connect. """ return {'timeout': 10, 'checkPID': 1} def listenArgs(self): """ @return: C{dict} of keyword arguments to pass to listen """ return {'backlog': 100, 'mode': 0600, 'wantPID': 1} def createServerEndpoint(self, reactor, factory, **listenArgs): """ Create an L{UNIXServerEndpoint} and return the tools to verify its behaviour. @param reactor: A fake L{IReactorUNIX} that L{UNIXServerEndpoint} can call L{IReactorUNIX.listenUNIX} on. @param factory: The thing that we expect to be passed to our L{IStreamServerEndpoint.listen} implementation. @param listenArgs: Optional dictionary of arguments to L{IReactorUNIX.listenUNIX}. """ address = UNIXAddress(self.mktemp()) return (endpoints.UNIXServerEndpoint(reactor, address.name, **listenArgs), (address.name, factory, listenArgs.get('backlog', 50), listenArgs.get('mode', 0666), listenArgs.get('wantPID', 0)), address) def createClientEndpoint(self, reactor, clientFactory, **connectArgs): """ Create an L{UNIXClientEndpoint} and return the values needed to verify its behaviour. @param reactor: A fake L{IReactorUNIX} that L{UNIXClientEndpoint} can call L{IReactorUNIX.connectUNIX} on. @param clientFactory: The thing that we expect to be passed to our L{IStreamClientEndpoint.connect} implementation. @param connectArgs: Optional dictionary of arguments to L{IReactorUNIX.connectUNIX} """ address = UNIXAddress(self.mktemp()) return (endpoints.UNIXClientEndpoint(reactor, address.name, **connectArgs), (address.name, clientFactory, connectArgs.get('timeout', 30), connectArgs.get('checkPID', 0)), address) class ParserTestCase(unittest.TestCase): """ Tests for L{endpoints._parseServer}, the low-level parsing logic. """ f = "Factory" def parse(self, *a, **kw): """ Provide a hook for test_strports to substitute the deprecated API. """ return endpoints._parseServer(*a, **kw) def test_simpleTCP(self): """ Simple strings with a 'tcp:' prefix should be parsed as TCP. """ self.assertEquals(self.parse('tcp:80', self.f), ('TCP', (80, self.f), {'interface':'', 'backlog':50})) def test_interfaceTCP(self): """ TCP port descriptions parse their 'interface' argument as a string. """ self.assertEquals( self.parse('tcp:80:interface=127.0.0.1', self.f), ('TCP', (80, self.f), {'interface':'127.0.0.1', 'backlog':50})) def test_backlogTCP(self): """ TCP port descriptions parse their 'backlog' argument as an integer. """ self.assertEquals(self.parse('tcp:80:backlog=6', self.f), ('TCP', (80, self.f), {'interface':'', 'backlog':6})) def test_simpleUNIX(self): """ L{endpoints._parseServer} returns a C{'UNIX'} port description with defaults for C{'mode'}, C{'backlog'}, and C{'wantPID'} when passed a string with the C{'unix:'} prefix and no other parameter values. """ self.assertEquals( self.parse('unix:/var/run/finger', self.f), ('UNIX', ('/var/run/finger', self.f), {'mode': 0666, 'backlog': 50, 'wantPID': True})) def test_modeUNIX(self): """ C{mode} can be set by including C{"mode=<some integer>"}. """ self.assertEquals( self.parse('unix:/var/run/finger:mode=0660', self.f), ('UNIX', ('/var/run/finger', self.f), {'mode': 0660, 'backlog': 50, 'wantPID': True})) def test_wantPIDUNIX(self): """ C{wantPID} can be set to false by included C{"lockfile=0"}. """ self.assertEquals( self.parse('unix:/var/run/finger:lockfile=0', self.f), ('UNIX', ('/var/run/finger', self.f), {'mode': 0666, 'backlog': 50, 'wantPID': False})) def test_escape(self): """ Backslash can be used to escape colons and backslashes in port descriptions. """ self.assertEquals( self.parse(r'unix:foo\:bar\=baz\:qux\\', self.f), ('UNIX', ('foo:bar=baz:qux\\', self.f), {'mode': 0666, 'backlog': 50, 'wantPID': True})) def test_quoteStringArgument(self): """ L{endpoints.quoteStringArgument} should quote backslashes and colons for interpolation into L{endpoints.serverFromString} and L{endpoints.clientFactory} arguments. """ self.assertEquals(endpoints.quoteStringArgument("some : stuff \\"), "some \\: stuff \\\\") def test_impliedEscape(self): """ In strports descriptions, '=' in a parameter value does not need to be quoted; it will simply be parsed as part of the value. """ self.assertEquals( self.parse(r'unix:address=foo=bar', self.f), ('UNIX', ('foo=bar', self.f), {'mode': 0666, 'backlog': 50, 'wantPID': True})) def test_nonstandardDefault(self): """ For compatibility with the old L{twisted.application.strports.parse}, the third 'mode' argument may be specified to L{endpoints.parse} to indicate a default other than TCP. """ self.assertEquals( self.parse('filename', self.f, 'unix'), ('UNIX', ('filename', self.f), {'mode': 0666, 'backlog': 50, 'wantPID': True})) def test_unknownType(self): """ L{strports.parse} raises C{ValueError} when given an unknown endpoint type. """ self.assertRaises(ValueError, self.parse, "bogus-type:nothing", self.f) class ServerStringTests(unittest.TestCase): """ Tests for L{twisted.internet.endpoints.serverFromString}. """ def test_tcp(self): """ When passed a TCP strports description, L{endpoints.serverFromString} returns a L{TCP4ServerEndpoint} instance initialized with the values from the string. """ reactor = object() server = endpoints.serverFromString( reactor, "tcp:1234:backlog=12:interface=10.0.0.1") self.assertIsInstance(server, endpoints.TCP4ServerEndpoint) self.assertIdentical(server._reactor, reactor) self.assertEquals(server._port, 1234) self.assertEquals(server._backlog, 12) self.assertEquals(server._interface, "10.0.0.1") def test_ssl(self): """ When passed an SSL strports description, L{endpoints.serverFromString} returns a L{SSL4ServerEndpoint} instance initialized with the values from the string. """ reactor = object() server = endpoints.serverFromString( reactor, "ssl:1234:backlog=12:privateKey=%s:" "certKey=%s:interface=10.0.0.1" % (escapedPEMPathName, escapedPEMPathName)) self.assertIsInstance(server, endpoints.SSL4ServerEndpoint) self.assertIdentical(server._reactor, reactor) self.assertEquals(server._port, 1234) self.assertEquals(server._backlog, 12) self.assertEquals(server._interface, "10.0.0.1") ctx = server._sslContextFactory.getContext() self.assertIsInstance(ctx, ContextType) if skipSSL: test_ssl.skip = skipSSL def test_unix(self): """ When passed a UNIX strports description, L{endpoint.serverFromString} returns a L{UNIXServerEndpoint} instance initialized with the values from the string. """ reactor = object() endpoint = endpoints.serverFromString( reactor, "unix:/var/foo/bar:backlog=7:mode=0123:lockfile=1") self.assertIsInstance(endpoint, endpoints.UNIXServerEndpoint) self.assertIdentical(endpoint._reactor, reactor) self.assertEquals(endpoint._address, "/var/foo/bar") self.assertEquals(endpoint._backlog, 7) self.assertEquals(endpoint._mode, 0123) self.assertEquals(endpoint._wantPID, True) def test_implicitDefaultNotAllowed(self): """ The older service-based API (L{twisted.internet.strports.service}) allowed an implicit default of 'tcp' so that TCP ports could be specified as a simple integer, but we've since decided that's a bad idea, and the new API does not accept an implicit default argument; you have to say 'tcp:' now. If you try passing an old implicit port number to the new API, you'll get a C{ValueError}. """ value = self.assertRaises( ValueError, endpoints.serverFromString, None, "4321") self.assertEquals( str(value), "Unqualified strport description passed to 'service'." "Use qualified endpoint descriptions; for example, 'tcp:4321'.") def test_unknownType(self): """ L{endpoints.serverFromString} raises C{ValueError} when given an unknown endpoint type. """ value = self.assertRaises( # faster-than-light communication not supported ValueError, endpoints.serverFromString, None, "ftl:andromeda/carcosa/hali/2387") self.assertEquals( str(value), "Unknown endpoint type: 'ftl'") def test_typeFromPlugin(self): """ L{endpoints.serverFromString} looks up plugins of type L{IStreamServerEndpoint} and constructs endpoints from them. """ # Set up a plugin which will only be accessible for the duration of # this test. addFakePlugin(self) # Plugin is set up: now actually test. notAReactor = object() fakeEndpoint = endpoints.serverFromString( notAReactor, "fake:hello:world:yes=no:up=down") from twisted.plugins.fakeendpoint import fake self.assertIdentical(fakeEndpoint.parser, fake) self.assertEquals(fakeEndpoint.args, (notAReactor, 'hello', 'world')) self.assertEquals(fakeEndpoint.kwargs, dict(yes='no', up='down')) def addFakePlugin(testCase, dropinSource="fakeendpoint.py"): """ For the duration of C{testCase}, add a fake plugin to twisted.plugins which contains some sample endpoint parsers. """ import sys savedModules = sys.modules.copy() savedPluginPath = plugins.__path__ def cleanup(): sys.modules.clear() sys.modules.update(savedModules) plugins.__path__[:] = savedPluginPath testCase.addCleanup(cleanup) fp = FilePath(testCase.mktemp()) fp.createDirectory() getModule(__name__).filePath.sibling(dropinSource).copyTo( fp.child(dropinSource)) plugins.__path__.append(fp.path) class ClientStringTests(unittest.TestCase): """ Tests for L{twisted.internet.endpoints.clientFromString}. """ def test_tcp(self): """ When passed a TCP strports description, L{endpointClient} returns a L{TCP4ClientEndpoint} instance initialized with the values from the string. """ reactor = object() client = endpoints.clientFromString( reactor, "tcp:host=example.com:port=1234:timeout=7:bindAddress=10.0.0.2") self.assertIsInstance(client, endpoints.TCP4ClientEndpoint) self.assertIdentical(client._reactor, reactor) self.assertEquals(client._host, "example.com") self.assertEquals(client._port, 1234) self.assertEquals(client._timeout, 7) self.assertEquals(client._bindAddress, "10.0.0.2") def test_tcpDefaults(self): """ A TCP strports description may omit I{timeout} or I{bindAddress} to allow the default to be used. """ reactor = object() client = endpoints.clientFromString( reactor, "tcp:host=example.com:port=1234") self.assertEquals(client._timeout, 30) self.assertEquals(client._bindAddress, None) def test_unix(self): """ When passed a UNIX strports description, L{endpointClient} returns a L{UNIXClientEndpoint} instance initialized with the values from the string. """ reactor = object() client = endpoints.clientFromString( reactor, "unix:path=/var/foo/bar:lockfile=1:timeout=9") self.assertIsInstance(client, endpoints.UNIXClientEndpoint) self.assertIdentical(client._reactor, reactor) self.assertEquals(client._path, "/var/foo/bar") self.assertEquals(client._timeout, 9) self.assertEquals(client._checkPID, True) def test_unixDefaults(self): """ A UNIX strports description may omit I{lockfile} or I{timeout} to allow the defaults to be used. """ client = endpoints.clientFromString(object(), "unix:path=/var/foo/bar") self.assertEquals(client._timeout, 30) self.assertEquals(client._checkPID, False) def test_typeFromPlugin(self): """ L{endpoints.clientFromString} looks up plugins of type L{IStreamClientEndpoint} and constructs endpoints from them. """ addFakePlugin(self) notAReactor = object() clientEndpoint = endpoints.clientFromString( notAReactor, "cfake:alpha:beta:cee=dee:num=1") from twisted.plugins.fakeendpoint import fakeClient self.assertIdentical(clientEndpoint.parser, fakeClient) self.assertEquals(clientEndpoint.args, ('alpha', 'beta')) self.assertEquals(clientEndpoint.kwargs, dict(cee='dee', num='1')) def test_unknownType(self): """ L{endpoints.serverFromString} raises C{ValueError} when given an unknown endpoint type. """ value = self.assertRaises( # faster-than-light communication not supported ValueError, endpoints.clientFromString, None, "ftl:andromeda/carcosa/hali/2387") self.assertEquals( str(value), "Unknown endpoint type: 'ftl'") class SSLClientStringTests(unittest.TestCase): """ Tests for L{twisted.internet.endpoints.clientFromString} which require SSL. """ if skipSSL: skip = skipSSL def test_ssl(self): """ When passed an SSL strports description, L{clientFromString} returns a L{SSL4ClientEndpoint} instance initialized with the values from the string. """ reactor = object() client = endpoints.clientFromString( reactor, "ssl:host=example.net:port=4321:privateKey=%s:" "certKey=%s:bindAddress=10.0.0.3:timeout=3:caCertsDir=%s" % (escapedPEMPathName, escapedPEMPathName, escapedCAsPathName)) self.assertIsInstance(client, endpoints.SSL4ClientEndpoint) self.assertIdentical(client._reactor, reactor) self.assertEquals(client._host, "example.net") self.assertEquals(client._port, 4321) self.assertEquals(client._timeout, 3) self.assertEquals(client._bindAddress, "10.0.0.3") certOptions = client._sslContextFactory self.assertIsInstance(certOptions, CertificateOptions) ctx = certOptions.getContext() self.assertIsInstance(ctx, ContextType) self.assertEquals(Certificate(certOptions.certificate), testCertificate) privateCert = PrivateCertificate(certOptions.certificate) privateCert._setPrivateKey(KeyPair(certOptions.privateKey)) self.assertEquals(privateCert, testPrivateCertificate) expectedCerts = [ Certificate.loadPEM(x.getContent()) for x in [casPath.child("thing1.pem"), casPath.child("thing2.pem")] if x.basename().lower().endswith('.pem') ] self.assertEquals([Certificate(x) for x in certOptions.caCerts], expectedCerts) def test_unreadableCertificate(self): """ If a certificate in the directory is unreadable, L{endpoints._loadCAsFromDir} will ignore that certificate. """ class UnreadableFilePath(FilePath): def getContent(self): data = FilePath.getContent(self) # There is a duplicate of thing2.pem, so ignore anything that # looks like it. if data == casPath.child("thing2.pem").getContent(): raise IOError(EPERM) else: return data casPathClone = casPath.child("ignored").parent() casPathClone.clonePath = UnreadableFilePath self.assertEquals( [Certificate(x) for x in endpoints._loadCAsFromDir(casPathClone)], [Certificate.loadPEM(casPath.child("thing1.pem").getContent())]) def test_sslSimple(self): """ When passed an SSL strports description without any extra parameters, L{clientFromString} returns a simple non-verifying endpoint that will speak SSL. """ reactor = object() client = endpoints.clientFromString( reactor, "ssl:host=simple.example.org:port=4321") certOptions = client._sslContextFactory self.assertIsInstance(certOptions, CertificateOptions) self.assertEquals(certOptions.verify, False) ctx = certOptions.getContext() self.assertIsInstance(ctx, ContextType)
apache-2.0
AdrianHuang/rt-thread
bsp/lpc5410x/rtconfig.py
28
2379
import os # toolchains options ARCH='arm' CPU='cortex-m4' CROSS_TOOL='gcc' BOARD_NAME = 'lpc5410x' if os.getenv('RTT_CC'): CROSS_TOOL = os.getenv('RTT_CC') if CROSS_TOOL == 'gcc': PLATFORM = 'gcc' EXEC_PATH = r'D:/Program Files/CodeSourcery/Sourcery_CodeBench_Lite_for_ARM_EABI/bin' elif CROSS_TOOL == 'keil': PLATFORM = 'armcc' EXEC_PATH = 'D:/Keil_v5' elif CROSS_TOOL == 'iar': print '================ERROR============================' print 'Not support iar yet!' print '=================================================' exit(0) if os.getenv('RTT_EXEC_PATH'): EXEC_PATH = os.getenv('RTT_EXEC_PATH') BUILD = 'debug' if PLATFORM == 'gcc': # toolchains PREFIX = 'arm-none-eabi-' CC = PREFIX + 'gcc' CXX = PREFIX + 'g++' AS = PREFIX + 'gcc' AR = PREFIX + 'ar' LINK = PREFIX + 'g++' TARGET_EXT = 'elf' SIZE = PREFIX + 'size' OBJDUMP = PREFIX + 'objdump' OBJCPY = PREFIX + 'objcopy' DEVICE = ' -mcpu=cortex-m4 -mthumb -ffunction-sections -fdata-sections' CFLAGS = DEVICE + ' -g -Wall ' AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb ' LFLAGS = DEVICE + ' -lm -lgcc -lc' + ' -nostartfiles -Wl,--gc-sections,-Map=rtthread-' + BOARD_NAME +'.map,-cref,-u,Reset_Handler -T rtthread-' + BOARD_NAME + '.ld' CPATH = '' LPATH = '' if BUILD == 'debug': CFLAGS += ' -O0 -gdwarf-2' AFLAGS += ' -gdwarf-2' else: CFLAGS += ' -O2' CXXFLAGS = CFLAGS POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n' elif PLATFORM == 'armcc': # toolchains CC = 'armcc' CXX = 'armcc' AS = 'armasm' AR = 'armar' LINK = 'armlink' TARGET_EXT = 'axf' DEVICE = ' --cpu Cortex-M4.fp' CFLAGS = DEVICE + ' --apcs=interwork' AFLAGS = DEVICE LFLAGS = DEVICE + ' --info sizes --info totals --info unused --info veneers --list rtthread_' + \ BOARD_NAME + '.map --scatter rtthread-' + BOARD_NAME + '.sct' CFLAGS += ' -I' + EXEC_PATH + '/ARM/RV31/INC' LFLAGS += ' --libpath ' + EXEC_PATH + '/ARM/RV31/LIB' CXXFLAGS = CFLAGS EXEC_PATH += '/arm/bin40/' if BUILD == 'debug': CFLAGS += ' -g -O0' AFLAGS += ' -g' else: CFLAGS += ' -O2' POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
gpl-2.0
FokkeZB/titanium_mobile
support/common/markdown/preprocessors.py
112
7128
""" PRE-PROCESSORS ============================================================================= Preprocessors work on source text before we start doing anything too complicated. """ import re import markdown HTML_PLACEHOLDER_PREFIX = markdown.STX+"wzxhzdk:" HTML_PLACEHOLDER = HTML_PLACEHOLDER_PREFIX + "%d" + markdown.ETX class Processor: def __init__(self, markdown_instance=None): if markdown_instance: self.markdown = markdown_instance class Preprocessor (Processor): """ Preprocessors are run after the text is broken into lines. Each preprocessor implements a "run" method that takes a pointer to a list of lines of the document, modifies it as necessary and returns either the same pointer or a pointer to a new list. Preprocessors must extend markdown.Preprocessor. """ def run(self, lines): """ Each subclass of Preprocessor should override the `run` method, which takes the document as a list of strings split by newlines and returns the (possibly modified) list of lines. """ pass class HtmlStash: """ This class is used for stashing HTML objects that we extract in the beginning and replace with place-holders. """ def __init__ (self): """ Create a HtmlStash. """ self.html_counter = 0 # for counting inline html segments self.rawHtmlBlocks=[] def store(self, html, safe=False): """ Saves an HTML segment for later reinsertion. Returns a placeholder string that needs to be inserted into the document. Keyword arguments: * html: an html segment * safe: label an html segment as safe for safemode Returns : a placeholder string """ self.rawHtmlBlocks.append((html, safe)) placeholder = HTML_PLACEHOLDER % self.html_counter self.html_counter += 1 return placeholder def reset(self): self.html_counter = 0 self.rawHtmlBlocks = [] class HtmlBlockPreprocessor(Preprocessor): """Remove html blocks from the text and store them for later retrieval.""" right_tag_patterns = ["</%s>", "%s>"] def _get_left_tag(self, block): return block[1:].replace(">", " ", 1).split()[0].lower() def _get_right_tag(self, left_tag, block): for p in self.right_tag_patterns: tag = p % left_tag i = block.rfind(tag) if i > 2: return tag.lstrip("<").rstrip(">"), i + len(p)-2 + len(left_tag) return block.rstrip()[-len(left_tag)-2:-1].lower(), len(block) def _equal_tags(self, left_tag, right_tag): if left_tag == 'div' or left_tag[0] in ['?', '@', '%']: # handle PHP, etc. return True if ("/" + left_tag) == right_tag: return True if (right_tag == "--" and left_tag == "--"): return True elif left_tag == right_tag[1:] \ and right_tag[0] != "<": return True else: return False def _is_oneliner(self, tag): return (tag in ['hr', 'hr/']) def run(self, lines): text = "\n".join(lines) new_blocks = [] text = text.split("\n\n") items = [] left_tag = '' right_tag = '' in_tag = False # flag while text: block = text[0] if block.startswith("\n"): block = block[1:] text = text[1:] if block.startswith("\n"): block = block[1:] if not in_tag: if block.startswith("<"): left_tag = self._get_left_tag(block) right_tag, data_index = self._get_right_tag(left_tag, block) if block[1] == "!": # is a comment block left_tag = "--" right_tag, data_index = self._get_right_tag(left_tag, block) # keep checking conditions below and maybe just append if data_index < len(block) \ and markdown.isBlockLevel(left_tag): text.insert(0, block[data_index:]) block = block[:data_index] if not (markdown.isBlockLevel(left_tag) \ or block[1] in ["!", "?", "@", "%"]): new_blocks.append(block) continue if self._is_oneliner(left_tag): new_blocks.append(block.strip()) continue if block.rstrip().endswith(">") \ and self._equal_tags(left_tag, right_tag): new_blocks.append( self.markdown.htmlStash.store(block.strip())) continue else: #if not block[1] == "!": # if is block level tag and is not complete if markdown.isBlockLevel(left_tag) or left_tag == "--" \ and not block.rstrip().endswith(">"): items.append(block.strip()) in_tag = True else: new_blocks.append( self.markdown.htmlStash.store(block.strip())) continue new_blocks.append(block) else: items.append(block.strip()) right_tag, data_index = self._get_right_tag(left_tag, block) if self._equal_tags(left_tag, right_tag): # if find closing tag in_tag = False new_blocks.append( self.markdown.htmlStash.store('\n\n'.join(items))) items = [] if items: new_blocks.append(self.markdown.htmlStash.store('\n\n'.join(items))) new_blocks.append('\n') new_text = "\n\n".join(new_blocks) return new_text.split("\n") class ReferencePreprocessor(Preprocessor): """ Remove reference definitions from text and store for later use. """ RE = re.compile(r'^(\ ?\ ?\ ?)\[([^\]]*)\]:\s*([^ ]*)(.*)$', re.DOTALL) def run (self, lines): new_text = []; for line in lines: m = self.RE.match(line) if m: id = m.group(2).strip().lower() t = m.group(4).strip() # potential title if not t: self.markdown.references[id] = (m.group(3), t) elif (len(t) >= 2 and (t[0] == t[-1] == "\"" or t[0] == t[-1] == "\'" or (t[0] == "(" and t[-1] == ")") ) ): self.markdown.references[id] = (m.group(3), t[1:-1]) else: new_text.append(line) else: new_text.append(line) return new_text #+ "\n"
apache-2.0
NL66278/OCB
addons/sales_team/res_config.py
366
1922
# -*- coding: utf-8 -*- from openerp.osv import fields, osv class sales_team_configuration(osv.TransientModel): _name = 'sale.config.settings' _inherit = ['sale.config.settings'] def set_group_multi_salesteams(self, cr, uid, ids, context=None): """ This method is automatically called by res_config as it begins with set. It is used to implement the 'one group or another' behavior. We have to perform some group manipulation by hand because in res_config.execute(), set_* methods are called after group_*; therefore writing on an hidden res_config file could not work. If group_multi_salesteams is checked: remove group_mono_salesteams from group_user, remove the users. Otherwise, just add group_mono_salesteams in group_user. The inverse logic about group_multi_salesteams is managed by the normal behavior of 'group_multi_salesteams' field. """ def ref(xml_id): mod, xml = xml_id.split('.', 1) return self.pool['ir.model.data'].get_object(cr, uid, mod, xml, context) for obj in self.browse(cr, uid, ids, context=context): config_group = ref('base.group_mono_salesteams') base_group = ref('base.group_user') if obj.group_multi_salesteams: base_group.write({'implied_ids': [(3, config_group.id)]}) config_group.write({'users': [(3, u.id) for u in base_group.users]}) else: base_group.write({'implied_ids': [(4, config_group.id)]}) return True _columns = { 'group_multi_salesteams': fields.boolean("Organize Sales activities into multiple Sales Teams", implied_group='base.group_multi_salesteams', help="""Allows you to use Sales Teams to manage your leads and opportunities."""), }
agpl-3.0
chrislit/abydos
tests/distance/test_distance_lcsuffix.py
1
3807
# Copyright 2019-2020 by Christopher C. Little. # This file is part of Abydos. # # Abydos is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Abydos is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Abydos. If not, see <http://www.gnu.org/licenses/>. """abydos.tests.distance.test_distance_lcsuffix. This module contains unit tests for abydos.distance.LCSuffix """ import unittest from abydos.distance import LCSuffix class LCSuffixTestCases(unittest.TestCase): """Test LCSuffix functions. abydos.distance.LCSuffix """ cmp = LCSuffix() def test_lcsuffix_sim(self): """Test abydos.distance.LCSuffix.sim.""" # Base cases self.assertEqual(self.cmp.sim('', ''), 1.0) self.assertEqual(self.cmp.sim('a', ''), 0.0) self.assertEqual(self.cmp.sim('', 'a'), 0.0) self.assertEqual(self.cmp.sim('abc', ''), 0.0) self.assertEqual(self.cmp.sim('', 'abc'), 0.0) self.assertEqual(self.cmp.sim('abc', 'abc'), 1.0) self.assertEqual(self.cmp.sim('abcd', 'efgh'), 0.0) self.assertAlmostEqual(self.cmp.sim('Nigel', 'Niall'), 0.2) self.assertAlmostEqual(self.cmp.sim('Niall', 'Nigel'), 0.2) self.assertAlmostEqual(self.cmp.sim('Colin', 'Coiln'), 0.2) self.assertAlmostEqual(self.cmp.sim('Coiln', 'Colin'), 0.2) self.assertAlmostEqual(self.cmp.sim('ATCAACGAGT', 'AACGATTAG'), 0.0) def test_lcsuffix_dist(self): """Test abydos.distance.LCSuffix.dist.""" # Base cases self.assertEqual(self.cmp.dist('', ''), 0.0) self.assertEqual(self.cmp.dist('a', ''), 1.0) self.assertEqual(self.cmp.dist('', 'a'), 1.0) self.assertEqual(self.cmp.dist('abc', ''), 1.0) self.assertEqual(self.cmp.dist('', 'abc'), 1.0) self.assertEqual(self.cmp.dist('abc', 'abc'), 0.0) self.assertEqual(self.cmp.dist('abcd', 'efgh'), 1.0) self.assertAlmostEqual(self.cmp.dist('Nigel', 'Niall'), 0.8) self.assertAlmostEqual(self.cmp.dist('Niall', 'Nigel'), 0.8) self.assertAlmostEqual(self.cmp.dist('Colin', 'Coiln'), 0.8) self.assertAlmostEqual(self.cmp.dist('Coiln', 'Colin'), 0.8) self.assertAlmostEqual(self.cmp.dist('ATCAACGAGT', 'AACGATTAG'), 1.0) def test_lcsuffix_dist_abs(self): """Test abydos.distance.LCSuffix.dist_abs.""" # Base cases self.assertEqual(self.cmp.dist_abs('', ''), 0) self.assertEqual(self.cmp.dist_abs('a', ''), 0) self.assertEqual(self.cmp.dist_abs('', 'a'), 0) self.assertEqual(self.cmp.dist_abs('abc', ''), 0) self.assertEqual(self.cmp.dist_abs('', 'abc'), 0) self.assertEqual(self.cmp.dist_abs('abc', 'abc'), 3) self.assertEqual(self.cmp.dist_abs('abcd', 'efgh'), 0) self.assertAlmostEqual(self.cmp.dist_abs('Nigel', 'Niall'), 1) self.assertAlmostEqual(self.cmp.dist_abs('Niall', 'Nigel'), 1) self.assertAlmostEqual(self.cmp.dist_abs('Colin', 'Coiln'), 1) self.assertAlmostEqual(self.cmp.dist_abs('Coiln', 'Colin'), 1) self.assertAlmostEqual(self.cmp.dist_abs('ATCAACGAGT', 'AACGATTAG'), 0) self.assertAlmostEqual(self.cmp.dist_abs('Nigel', 'Niall', 'Niel'), 1) with self.assertRaises(TypeError): self.cmp.dist_abs('Nigel', 'Niall', 5) if __name__ == '__main__': unittest.main()
gpl-3.0