repo_name
stringlengths
5
100
path
stringlengths
4
294
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
pekeler/arangodb
3rdParty/V8-4.3.61/tools/generate-builtins-tests.py
89
4569
#!/usr/bin/env python # Copyright 2014 the V8 project authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import json import optparse import os import random import shutil import subprocess import sys BLACKLIST = [ # Skip special d8 functions. "load", "os", "print", "read", "readline", "quit" ] def GetRandomObject(): return random.choice([ "0", "1", "2.5", "0x1000", "\"string\"", "{foo: \"bar\"}", "[1, 2, 3]", "function() { return 0; }" ]) g_var_index = 0 def GetVars(result, num, first = []): global g_var_index variables = [] for i in range(num): variables.append("__v_%d" % g_var_index) g_var_index += 1 for var in variables: result.append("var %s = %s;" % (var, GetRandomObject())) return ", ".join(first + variables) # Wraps |string| in try..catch. def TryCatch(result, string, exception_behavior = ""): result.append("try { %s } catch(e) { %s }" % (string, exception_behavior)) def BuildTests(function, full_name, options): assert function["type"] == "function" global g_var_index g_var_index = 0 result = ["// AUTO-GENERATED BY tools/generate-builtins-tests.py.\n"] result.append("// Function call test:") length = function["length"] TryCatch(result, "%s(%s);" % (full_name, GetVars(result, length))) if "prototype" in function: proto = function["prototype"] result.append("\n// Constructor test:") TryCatch(result, "var recv = new %s(%s);" % (full_name, GetVars(result, length)), "var recv = new Object();") getters = [] methods = [] for prop in proto: proto_property = proto[prop] proto_property_type = proto_property["type"] if proto_property_type == "getter": getters.append(proto_property) result.append("recv.__defineGetter__(\"%s\", " "function() { return %s; });" % (proto_property["name"], GetVars(result, 1))) if proto_property_type == "number": result.append("recv.__defineGetter__(\"%s\", " "function() { return %s; });" % (proto_property["name"], GetVars(result, 1))) if proto_property_type == "function": methods.append(proto_property) if getters: result.append("\n// Getter tests:") for getter in getters: result.append("print(recv.%s);" % getter["name"]) if methods: result.append("\n// Method tests:") for method in methods: args = GetVars(result, method["length"], ["recv"]) call = "%s.prototype.%s.call(%s)" % (full_name, method["name"], args) TryCatch(result, call) filename = os.path.join(options.outdir, "%s.js" % (full_name)) with open(filename, "w") as f: f.write("\n".join(result)) f.write("\n") def VisitObject(obj, path, options): obj_type = obj["type"] obj_name = "%s%s" % (path, obj["name"]) if obj_type == "function": BuildTests(obj, obj_name, options) if "properties" in obj: for prop_name in obj["properties"]: prop = obj["properties"][prop_name] VisitObject(prop, "%s." % (obj_name), options) def ClearGeneratedFiles(options): if os.path.exists(options.outdir): shutil.rmtree(options.outdir) def GenerateTests(options): ClearGeneratedFiles(options) # Re-generate everything. output = subprocess.check_output( "%s %s" % (options.d8, options.script), shell=True).strip() objects = json.loads(output) os.makedirs(options.outdir) for obj_name in objects: if obj_name in BLACKLIST: continue obj = objects[obj_name] VisitObject(obj, "", options) def BuildOptions(): result = optparse.OptionParser() result.add_option("--d8", help="d8 binary to use", default="out/ia32.release/d8") result.add_option("--outdir", help="directory where to place generated tests", default="test/mjsunit/builtins-gen") result.add_option("--script", help="builtins detector script to run in d8", default="tools/detect-builtins.js") return result def Main(): parser = BuildOptions() (options, args) = parser.parse_args() if len(args) != 1 or args[0] == "help": parser.print_help() return 1 action = args[0] if action == "generate": GenerateTests(options) return 0 if action == "clear": ClearGeneratedFiles(options) return 0 print("Unknown action: %s" % action) parser.print_help() return 1 if __name__ == "__main__": sys.exit(Main())
apache-2.0
sungtaek/django-fhossapi
fhossapi/models.py
1
18739
from django.db import models # Create your models here class Imsu(models.Model): id = models.IntegerField(db_column='id', primary_key=True, editable=False) name = models.CharField(db_column='name', max_length=255, unique=True) scscf_name = models.CharField(db_column='scscf_name', max_length=255, null=True, blank=True) diameter_name = models.CharField(db_column='diameter_name', max_length=255, null=True, default='', blank=True) capa_set = models.ForeignKey('CapabilitiesSet', db_column='id_capabilities_set', to_field='id_set', null=True, editable=False) pref_scscf_set= models.ForeignKey('PreferredScscfSet', db_column='id_preferred_scscf_set', to_field='id_set', null=True, editable=False) def dict(self, detail=False): val = {} val['name'] = self.name if detail: val['scscf_name'] = self.scscf_name val['diameter_name'] = self.diameter_name if self.capa_set: val['capa_set'] = self.capa_set.name if self.pref_scscf_set: val['pref_scscf_set'] = self.pref_scscf_set.name val['impi'] = [] for impi in self.impis.all(): val['impi'].append(impi.dict(detail)) return val class Meta: db_table = 'imsu' managed = False class Impi(models.Model): DIGEST_AKAV1_MD5 = 1 DIGEST_AKAV2_MD5 = 2 DIGEST_MD5 = 4 DIGEST = 8 HTTP_DIGEST_MD5 = 16 EARLY_IMS_SECURITY = 32 NASS_BUNDLED = 64 SIP_DIGEST = 128 AUTH_CHOICE = ( (DIGEST_AKAV1_MD5, 'Digest-AKAv1-MD5'), (DIGEST_AKAV2_MD5, 'Digest-AKAv2-MD5'), (DIGEST_MD5, 'Digest-MD5'), (DIGEST, 'Digest'), (HTTP_DIGEST_MD5, 'HTTP-Digest-MD5'), (EARLY_IMS_SECURITY, 'Ealry-IMS-Security'), (NASS_BUNDLED, 'NASS-Bundled'), (SIP_DIGEST, 'SIP-Digest'), ) id = models.IntegerField(db_column='id', primary_key=True, editable=False) imsu = models.ForeignKey('Imsu', db_column='id_imsu', related_name='impis', editable=False) identity = models.CharField(db_column='identity', max_length=255, unique=True) secret_key = models.BinaryField(db_column='k') avail_auth = models.IntegerField(db_column='auth_scheme', default=129) def_auth = models.IntegerField(db_column='default_auth_scheme', choices=AUTH_CHOICE, default=SIP_DIGEST) amf = models.BinaryField(db_column='amf', default='0000') op = models.BinaryField(db_column='op', default='00000000000000000000000000000000') sqn = models.CharField(db_column='sqn', max_length=64, default='000000000000') early_ims_ip= models.CharField(db_column='ip', max_length=64, default='') dsl_line_id = models.CharField(db_column='line_identifier', max_length=64, default='') zh_uicc_type= models.IntegerField(db_column='zh_uicc_type', null=True, default=0) zh_key_life_time= models.IntegerField(db_column='zh_key_life_time', null=True, default=3600) zh_def_auth = models.IntegerField(db_column='zh_default_auth_scheme', choices=AUTH_CHOICE, default=SIP_DIGEST) def dict(self, detail=False): val = {} val['identity'] = self.identity if detail: val['secret_key'] = self.secret_key val['avail_auth'] = [] for auth in self.AUTH_CHOICE: if auth[0] & self.avail_auth: val['avail_auth'].append(auth[1]) val['def_auth'] = self.get_def_auth_display() if detail: val['amf'] = self.amf val['op'] = self.op val['sqn'] = self.sqn val['early_ims_ip'] = self.early_ims_ip val['dsl_line_id'] = self.dsl_line_id val['zh_uicc_type'] = self.zh_uicc_type val['zh_key_life_time'] = self.zh_key_life_time val['zh_def_auth'] = self.get_zh_def_auth_display() val['impu'] = [] for impu in self.impus.all(): val['impu'].append(impu.dict(detail)) return val class Meta: db_table = 'impi' managed = False class ImpiImpu(models.Model): id = models.IntegerField(db_column='id', primary_key=True, editable=False) impi = models.ForeignKey('Impi', db_column='id_impi', editable=False) impu = models.ForeignKey('Impu', db_column='id_impu', editable=False) user_status = models.IntegerField(db_column='user_state', default=0) class Meta: db_table = 'impi_impu' managed = False class Impu(models.Model): PUBLIC_USER_IDENTITY = 0 DISTINCT_PSI = 1 WILDCARDED_PSI = 2 IMPU_TYPE_CHOICE = ( (PUBLIC_USER_IDENTITY, 'Public User Identity'), (DISTINCT_PSI, 'Distinct PSI'), (WILDCARDED_PSI, 'Wildcarted PSI'), ) NOT_REGISTERED = 0 REGISTERED = 1 UNREGISTERED = 2 AUTH_PENDING = 3 USER_STATUS_CHOICE = ( (NOT_REGISTERED, 'Not Registered'), (REGISTERED, 'Registered'), (UNREGISTERED, 'Unregistered'), (AUTH_PENDING, 'Auth Pending'), ) id = models.IntegerField(db_column='id', primary_key=True, editable=False) identity = models.CharField(db_column='identity', max_length=255, unique=True) impu_type = models.IntegerField(db_column='type', choices=IMPU_TYPE_CHOICE, default=PUBLIC_USER_IDENTITY) barring = models.BooleanField(db_column='barring', default=False) user_status = models.IntegerField(db_column='user_state', choices=USER_STATUS_CHOICE, default=NOT_REGISTERED) service_profile=models.ForeignKey('ServiceProfile', db_column='id_sp', null=True, editable=False) implicit_set= models.IntegerField(db_column='id_implicit_set', default=-1, editable=False) charging_set=models.ForeignKey('ChargingSet', db_column='id_charging_info', null=True, editable=False) wildcard_psi= models.CharField(db_column='wildcard_psi', max_length=255, default='') display_name= models.CharField(db_column='display_name', max_length=255, default='') psi_activation= models.BooleanField(db_column='psi_activation', default=False) can_register= models.BooleanField(db_column='can_register', default=True) impis = models.ManyToManyField('Impi', through='ImpiImpu', related_name='impus', editable=False) def dict(self, detail=False): val = {} val['identity'] = self.identity if detail: val['impu_type'] = self.get_impu_type_display() val['barring'] = self.barring val['user_status'] = self.get_user_status_display() if self.service_profile: val['service_profile'] = self.service_profile.name if detail: if self.charging_set: val['charging_set'] = self.charging_set.name val['wildcard_psi'] = self.wildcard_psi val['display_name'] = self.display_name val['psi_activation'] = self.psi_activation val['can_register'] = self.can_register return val class Meta: db_table = 'impu' managed = False class ServiceProfile(models.Model): id = models.IntegerField(db_column='id', primary_key=True, editable=False) name = models.CharField(db_column='name', max_length=255, unique=True) cn_service_auth= models.IntegerField(db_column='cn_service_auth', null=True, default=0) def dict(self, detail=False): val = {} val['name'] = self.name val['ifc'] = [] for ifc in self.ifcs.all(): val['ifc'].append(ifc.dict(detail)) return val class Meta: db_table = 'sp' managed = False class ServiceProfileIfc(models.Model): id = models.IntegerField(db_column='id', primary_key=True, editable=False) id_sp = models.ForeignKey('ServiceProfile', db_column='id_sp', editable=False) id_ifc = models.ForeignKey('Ifc', db_column='id_ifc', editable=False) priority = models.IntegerField(db_column='priority', default=0) class Meta: db_table = 'sp_ifc' managed = False class Ifc(models.Model): ANY = -1 REGISTERED = 0 UNREGISTERED= 1 PROFILE_CHOICE = ( (ANY, 'Any'), (REGISTERED, 'Registered'), (UNREGISTERED, 'Unregistered'), ) id = models.IntegerField(db_column='id', primary_key=True, editable=False) name = models.CharField(db_column='name', max_length=255, unique=True) application_server=models.ForeignKey('ApplicationServer', db_column='id_application_server', editable=False) trigger_point=models.ForeignKey('TriggerPoint', db_column='id_tp', editable=False) profile_part_indicator=models.IntegerField(db_column='profile_part_ind', choices=PROFILE_CHOICE, default=ANY) service_profiles = models.ManyToManyField('ServiceProfile', through='ServiceProfileIfc', related_name='ifcs', editable=False) def dict(self, detail=False): val = {} val['name'] = self.name val['application_server'] = self.application_server.dict(detail) val['trigger_point'] = self.trigger_point.dict(detail) if detail: val['profile_part_indicator'] = self.get_profile_part_indicator_display() return val class Meta: db_table = 'ifc' managed = False class ApplicationServer(models.Model): SESSION_CONTINUE = 0 SESSION_TERMINATED = 1 DEFAULT_HANDLING_CHOICE = ( (SESSION_CONTINUE, 'Session Continue'), (SESSION_TERMINATED, 'Session Terminated'), ) id = models.IntegerField(db_column='id', primary_key=True, editable=False) name = models.CharField(db_column='name', max_length=255, unique=True) server_name = models.CharField(db_column='server_name', max_length=255, unique=True) default_handling= models.IntegerField(db_column='default_handling', choices=DEFAULT_HANDLING_CHOICE, default=SESSION_CONTINUE) service_info= models.CharField(db_column='service_info', max_length=255, default='') diameter_fqdn= models.CharField(db_column='diameter_address', max_length=255, unique=True) rep_data_limit= models.IntegerField(db_column='rep_data_size_limit', default=1024) udr_allow = models.BooleanField(db_column='udr', default=True) pur_allow = models.BooleanField(db_column='pur', default=True) snr_allow = models.BooleanField(db_column='snr', default=True) include_regi_response=models.BooleanField(db_column='include_register_response', default=False) include_regi_request=models.BooleanField(db_column='include_register_request', default=False) def dict(self, detail=False): val = {} val['name'] = self.name val['server_name'] = self.server_name if detail: val['default_handling'] = self.get_default_handling_display() val['service_info'] = self.service_info val['diameter_fqdn'] = self.diameter_fqdn val['rep_data_limit'] = self.rep_data_limit val['udr_allow'] = self.udr_allow val['pur_allow'] = self.pur_allow val['snr_allow'] = self.snr_allow val['include_regi_response'] = self.include_regi_response val['include_regi_request'] = self.include_regi_request return val class Meta: db_table = 'application_server' managed = False class TriggerPoint(models.Model): CONDITION_TYPE_DNF = 0 CONDITION_TYPE_CNF = 1 CONDITION_TYPE_CHOICE = ( (CONDITION_TYPE_DNF, 'Disjunctive Normal Format'), (CONDITION_TYPE_CNF, 'Conjunctive Normal Format'), ) id = models.IntegerField(db_column='id', primary_key=True, editable=False) name = models.CharField(db_column='name', max_length=255, unique=True) condition_type = models.IntegerField(db_column='condition_type_cnf', choices=CONDITION_TYPE_CHOICE, default=CONDITION_TYPE_DNF) def dict(self, detail=False): val = {} val['name'] = self.name val['condition_type'] = self.get_condition_type_display() if detail: val['spt'] = [] for spt in self.spts.all(): val['spt'].append(spt.dict(detail)) return val class Meta: db_table = 'tp' managed = False class Spt(models.Model): TYPE_REQUEST_URI = 0 TYPE_METHOD = 1 TYPE_SIP_HEADER = 2 TYPE_SESSION_CASE = 3 TYPE_SDP_LINE = 4 TYPE_CHOICE = ( (TYPE_REQUEST_URI, 'Request-URI'), (TYPE_METHOD, 'SIP Method'), (TYPE_SIP_HEADER, 'SIP Header'), (TYPE_SESSION_CASE, 'Session Case'), (TYPE_SDP_LINE, 'Session Description'), ) METHOD_INVITE = 'INVITE' METHOD_REGISTER = 'REGISTER' METHOD_CANCEL = 'CANCEL' METHOD_OPTION = 'OPTION' METHOD_PUBLISH = 'PUBLISH' METHOD_SUBSCRIBE = 'SUBSCRIBE' METHOD_MESSAGE = 'MESSAGE' METHOD_INFO = 'INFO' METHOD_REFER = 'REFER' METHOD_CHOICE = ( (METHOD_INVITE, 'INVITE'), (METHOD_REGISTER, 'REGISTER'), (METHOD_CANCEL, 'CANCEL'), (METHOD_OPTION, 'OPTION'), (METHOD_PUBLISH, 'PUBLISH'), (METHOD_SUBSCRIBE, 'SUBSCRIBE'), (METHOD_MESSAGE, 'MESSAGE'), (METHOD_INFO, 'INFO'), (METHOD_REFER, 'REFER'), ) SESSION_CASE_ORIGIN = 0 SESSION_CASE_TERM_REG = 1 SESSION_CASE_TERM_UNREG = 2 SESSION_CASE_ORIGIN_UNREG= 3 SESSION_CASE_ORIGIN_CDIV= 4 SESSION_CASE_CHOICE = ( (SESSION_CASE_ORIGIN, 'Origin-Session'), (SESSION_CASE_TERM_REG, 'Term-Reg'), (SESSION_CASE_TERM_UNREG, 'Term-UnReg'), (SESSION_CASE_ORIGIN_UNREG, 'Origin-UnReg'), (SESSION_CASE_ORIGIN_CDIV, 'Origin-Cdiv'), ) ACTIVE_REG = 1 ACTIVE_REREG = 2 ACTIVE_DEREG = 4 ACTIVE_CHOICE = ( (ACTIVE_REG, 'Reg'), (ACTIVE_REREG, 'ReReg'), (ACTIVE_DEREG, 'DeReg'), ) id = models.IntegerField(db_column='id', primary_key=True, editable=False) trigger_point= models.ForeignKey('TriggerPoint', db_column='id_tp', related_name='spts', editable=False) condition_nagated=models.BooleanField(db_column='condition_negated', default=False) group = models.IntegerField(db_column='grp', default=0) type = models.IntegerField(db_column='type', choices=TYPE_CHOICE, default=TYPE_REQUEST_URI) requesturi = models.CharField(db_column='requesturi', max_length=255, null=True) method = models.CharField(db_column='method', max_length=255, choices=METHOD_CHOICE, default=METHOD_INVITE, null=True) header = models.CharField(db_column='header', max_length=255, null=True) header_content= models.CharField(db_column='header_content', max_length=255, null=True) session_case= models.IntegerField(db_column='session_case', null=True, choices=SESSION_CASE_CHOICE, default=SESSION_CASE_ORIGIN) sdp_line = models.CharField(db_column='sdp_line', max_length=255, null=True) sdp_content = models.CharField(db_column='sdp_line_content', max_length=255, null=True) regi_type = models.IntegerField(db_column='registration_type', null=True, default=0) def dict(self, detail=False): val = {} if detail: val['condition_nagated'] = self.condition_nagated val['group'] = self.group val['type'] = self.get_type_display() if self.type == self.TYPE_REQUEST_URI: val['value'] = self.requesturi elif self.type == self.TYPE_METHOD: val['value'] = self.method if self.method == self.METHOD_REGISTER: val['active'] = [] for active in self.ACTIVE_CHOICE: if active[0] & self.regi_type: val['active'].append(active[1]) elif self.type == self.TYPE_SIP_HEADER: val['value'] = {'header': self.header, 'content': self.header_content} elif self.type == self.TYPE_SESSION_CASE: val['value'] = self.get_session_case_display() elif self.type == self.TYPE_SDP_LINE: val['value'] = {'line': self.sdp_line, 'content': self.sdp_content} return val class Meta: db_table = 'spt' managed = False class CapabilitiesSet(models.Model): id = models.IntegerField(db_column='id', primary_key=True, editable=False) id_set = models.IntegerField(db_column='id_set', unique=True) name = models.CharField(db_column='name', max_length=255, unique=True) id_capability= models.IntegerField(db_column='id_capability', unique=True) is_mandatory= models.BooleanField(db_column='is_mandatory', default=False) class Meta: db_table = 'capabilities_set' managed = False class PreferredScscfSet(models.Model): id = models.IntegerField(db_column='id', primary_key=True, editable=False) id_set = models.IntegerField(db_column='id_set', unique=True) name = models.CharField(db_column='name', max_length=255, unique=True) scscf_name = models.CharField(db_column='scscf_name', max_length=255) priority = models.IntegerField(db_column='priority', unique=True) class Meta: db_table = 'preferred_scscf_set' managed = False class ChargingSet(models.Model): id = models.IntegerField(db_column='id', primary_key=True, editable=False) name = models.CharField(db_column='name', max_length=255, unique=True) pri_ecf = models.CharField(db_column='pri_ecf', max_length=255) sec_ecf = models.CharField(db_column='sec_ecf', max_length=255) pri_ccf = models.CharField(db_column='pri_ccf', max_length=255) sec_ccf = models.CharField(db_column='sec_ccf', max_length=255) class Meta: db_table = 'charging_info' managed = False
apache-2.0
xingyepei/edx-platform
common/djangoapps/student/tests/test_certificates.py
23
5682
"""Tests for display of certificates on the student dashboard. """ import unittest import ddt from django.conf import settings from django.core.urlresolvers import reverse from mock import patch from django.test.utils import override_settings from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase from xmodule.modulestore.tests.factories import CourseFactory from student.tests.factories import UserFactory, CourseEnrollmentFactory from certificates.tests.factories import GeneratedCertificateFactory # pylint: disable=import-error from certificates.api import get_certificate_url # pylint: disable=import-error from course_modes.models import CourseMode # pylint: disable=no-member @ddt.ddt @unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms') class CertificateDisplayTest(ModuleStoreTestCase): """Tests display of certificates on the student dashboard. """ USERNAME = "test_user" PASSWORD = "password" DOWNLOAD_URL = "http://www.example.com/certificate.pdf" def setUp(self): super(CertificateDisplayTest, self).setUp() self.user = UserFactory.create(username=self.USERNAME, password=self.PASSWORD) result = self.client.login(username=self.USERNAME, password=self.PASSWORD) self.assertTrue(result, msg="Could not log in") self.course = CourseFactory() self.course.certificates_display_behavior = "early_with_info" self.update_course(self.course, self.user.username) @ddt.data('verified', 'professional') @patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': False}) def test_display_verified_certificate(self, enrollment_mode): self._create_certificate(enrollment_mode) self._check_can_download_certificate() @patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': False}) def test_display_verified_certificate_no_id(self): """ Confirm that if we get a certificate with a no-id-professional mode we still can download our certificate """ self._create_certificate(CourseMode.NO_ID_PROFESSIONAL_MODE) self._check_can_download_certificate_no_id() @ddt.data('verified', 'honor') @override_settings(CERT_NAME_SHORT='Test_Certificate') @patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': True}) def test_display_download_certificate_button(self, enrollment_mode): """ Tests if CERTIFICATES_HTML_VIEW is True and course has enabled web certificates via cert_html_view_enabled setting and no active certificate configuration available then any of the Download certificate button should not be visible. """ self.course.cert_html_view_enabled = True self.course.save() self.store.update_item(self.course, self.user.id) self._create_certificate(enrollment_mode) self._check_can_not_download_certificate() @ddt.data('verified') @override_settings(CERT_NAME_SHORT='Test_Certificate') @patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': True}) def test_linked_student_to_web_view_credential(self, enrollment_mode): test_url = get_certificate_url( user_id=self.user.id, course_id=unicode(self.course.id) ) self._create_certificate(enrollment_mode) certificates = [ { 'id': 0, 'name': 'Test Name', 'description': 'Test Description', 'is_active': True, 'signatories': [], 'version': 1 } ] self.course.certificates = {'certificates': certificates} self.course.cert_html_view_enabled = True self.course.save() # pylint: disable=no-member self.store.update_item(self.course, self.user.id) response = self.client.get(reverse('dashboard')) self.assertContains(response, u'View Test_Certificate') self.assertContains(response, test_url) def _create_certificate(self, enrollment_mode): """Simulate that the user has a generated certificate. """ CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id, mode=enrollment_mode) GeneratedCertificateFactory( user=self.user, course_id=self.course.id, mode=enrollment_mode, download_url=self.DOWNLOAD_URL, status="downloadable", grade=0.98, ) def _check_can_download_certificate(self): response = self.client.get(reverse('dashboard')) self.assertContains(response, u'Download Your ID Verified') self.assertContains(response, self.DOWNLOAD_URL) def _check_can_download_certificate_no_id(self): """ Inspects the dashboard to see if a certificate for a non verified course enrollment is present """ response = self.client.get(reverse('dashboard')) self.assertContains(response, u'Download') self.assertContains(response, u'(PDF)') self.assertContains(response, self.DOWNLOAD_URL) def _check_can_not_download_certificate(self): """ Make sure response does not have any of the download certificate buttons """ response = self.client.get(reverse('dashboard')) self.assertNotContains(response, u'View Test_Certificate') self.assertNotContains(response, u'Download Your Test_Certificate (PDF)') self.assertNotContains(response, u'Download Test_Certificate (PDF)') self.assertNotContains(response, self.DOWNLOAD_URL)
agpl-3.0
gertingold/scipy
scipy/io/arff/arffread.py
4
26521
# Last Change: Mon Aug 20 08:00 PM 2007 J from __future__ import division, print_function, absolute_import import re import datetime from collections import OrderedDict import numpy as np from scipy._lib.six import next import csv import ctypes """A module to read arff files.""" __all__ = ['MetaData', 'loadarff', 'ArffError', 'ParseArffError'] # An Arff file is basically two parts: # - header # - data # # A header has each of its components starting by @META where META is one of # the keyword (attribute of relation, for now). # TODO: # - both integer and reals are treated as numeric -> the integer info # is lost! # - Replace ValueError by ParseError or something # We know can handle the following: # - numeric and nominal attributes # - missing values for numeric attributes r_meta = re.compile(r'^\s*@') # Match a comment r_comment = re.compile(r'^%') # Match an empty line r_empty = re.compile(r'^\s+$') # Match a header line, that is a line which starts by @ + a word r_headerline = re.compile(r'^\s*@\S*') r_datameta = re.compile(r'^@[Dd][Aa][Tt][Aa]') r_relation = re.compile(r'^@[Rr][Ee][Ll][Aa][Tt][Ii][Oo][Nn]\s*(\S*)') r_attribute = re.compile(r'^\s*@[Aa][Tt][Tt][Rr][Ii][Bb][Uu][Tt][Ee]\s*(..*$)') r_nominal = re.compile('{(.+)}') r_date = re.compile(r"[Dd][Aa][Tt][Ee]\s+[\"']?(.+?)[\"']?$") # To get attributes name enclosed with '' r_comattrval = re.compile(r"'(..+)'\s+(..+$)") # To get normal attributes r_wcomattrval = re.compile(r"(\S+)\s+(..+$)") # ------------------------ # Module defined exception # ------------------------ class ArffError(IOError): pass class ParseArffError(ArffError): pass # ---------- # Attributes # ---------- class Attribute(object): type_name = None def __init__(self, name): self.name = name self.range = None self.dtype = np.object_ @classmethod def parse_attribute(cls, name, attr_string): """ Parse the attribute line if it knows how. Returns the parsed attribute, or None. """ return None def parse_data(self, data_str): """ Parse a value of this type. """ return None def __str__(self): """ Parse a value of this type. """ return self.name + ',' + self.type_name class NominalAttribute(Attribute): type_name = 'nominal' def __init__(self, name, values): super().__init__(name) self.values = values self.range = values self.dtype = (np.string_, max(len(i) for i in values)) @staticmethod def _get_nom_val(atrv): """Given a string containing a nominal type, returns a tuple of the possible values. A nominal type is defined as something framed between braces ({}). Parameters ---------- atrv : str Nominal type definition Returns ------- poss_vals : tuple possible values Examples -------- >>> get_nom_val("{floup, bouga, fl, ratata}") ('floup', 'bouga', 'fl', 'ratata') """ m = r_nominal.match(atrv) if m: attrs, _ = split_data_line(m.group(1)) return tuple(attrs) else: raise ValueError("This does not look like a nominal string") @classmethod def parse_attribute(cls, name, attr_string): """ Parse the attribute line if it knows how. Returns the parsed attribute, or None. For nominal attributes, the attribute string would be like '{<attr_1>, <attr2>, <attr_3>}'. """ if attr_string[0] == '{': values = cls._get_nom_val(attr_string) return cls(name, values) else: return None def parse_data(self, data_str): """ Parse a value of this type. """ if data_str in self.values: return data_str elif data_str == '?': return data_str else: raise ValueError("%s value not in %s" % (str(data_str), str(self.values))) def __str__(self): msg = self.name + ",{" for i in range(len(self.values)-1): msg += self.values[i] + "," msg += self.values[-1] msg += "}" return msg class NumericAttribute(Attribute): def __init__(self, name): super().__init__(name) self.type_name = 'numeric' self.dtype = np.float_ @classmethod def parse_attribute(cls, name, attr_string): """ Parse the attribute line if it knows how. Returns the parsed attribute, or None. For numeric attributes, the attribute string would be like 'numeric' or 'int' or 'real'. """ attr_string = attr_string.lower().strip() if(attr_string[:len('numeric')] == 'numeric' or attr_string[:len('int')] == 'int' or attr_string[:len('real')] == 'real'): return cls(name) else: return None def parse_data(self, data_str): """ Parse a value of this type. Parameters ---------- data_str : str string to convert Returns ------- f : float where float can be nan Examples -------- >>> atr = NumericAttribute('atr') >>> atr.parse_data('1') 1.0 >>> atr.parse_data('1\\n') 1.0 >>> atr.parse_data('?\\n') nan """ if '?' in data_str: return np.nan else: return float(data_str) def _basic_stats(self, data): nbfac = data.size * 1. / (data.size - 1) return (np.nanmin(data), np.nanmax(data), np.mean(data), np.std(data) * nbfac) class StringAttribute(Attribute): def __init__(self, name): super().__init__(name) self.type_name = 'string' @classmethod def parse_attribute(cls, name, attr_string): """ Parse the attribute line if it knows how. Returns the parsed attribute, or None. For string attributes, the attribute string would be like 'string'. """ attr_string = attr_string.lower().strip() if attr_string[:len('string')] == 'string': return cls(name) else: return None class DateAttribute(Attribute): def __init__(self, name, date_format, datetime_unit): super().__init__(name) self.date_format = date_format self.datetime_unit = datetime_unit self.type_name = 'date' self.range = date_format self.dtype = np.datetime64(0, self.datetime_unit) @staticmethod def _get_date_format(atrv): m = r_date.match(atrv) if m: pattern = m.group(1).strip() # convert time pattern from Java's SimpleDateFormat to C's format datetime_unit = None if "yyyy" in pattern: pattern = pattern.replace("yyyy", "%Y") datetime_unit = "Y" elif "yy": pattern = pattern.replace("yy", "%y") datetime_unit = "Y" if "MM" in pattern: pattern = pattern.replace("MM", "%m") datetime_unit = "M" if "dd" in pattern: pattern = pattern.replace("dd", "%d") datetime_unit = "D" if "HH" in pattern: pattern = pattern.replace("HH", "%H") datetime_unit = "h" if "mm" in pattern: pattern = pattern.replace("mm", "%M") datetime_unit = "m" if "ss" in pattern: pattern = pattern.replace("ss", "%S") datetime_unit = "s" if "z" in pattern or "Z" in pattern: raise ValueError("Date type attributes with time zone not " "supported, yet") if datetime_unit is None: raise ValueError("Invalid or unsupported date format") return pattern, datetime_unit else: raise ValueError("Invalid or no date format") @classmethod def parse_attribute(cls, name, attr_string): """ Parse the attribute line if it knows how. Returns the parsed attribute, or None. For date attributes, the attribute string would be like 'date <format>'. """ attr_string_lower = attr_string.lower().strip() if attr_string_lower[:len('date')] == 'date': date_format, datetime_unit = cls._get_date_format(attr_string) return cls(name, date_format, datetime_unit) else: return None def parse_data(self, data_str): """ Parse a value of this type. """ date_str = data_str.strip().strip("'").strip('"') if date_str == '?': return np.datetime64('NaT', self.datetime_unit) else: dt = datetime.datetime.strptime(date_str, self.date_format) return np.datetime64(dt).astype( "datetime64[%s]" % self.datetime_unit) def __str__(self): return super(DateAttribute, self).__str__() + ',' + self.date_format class RelationalAttribute(Attribute): def __init__(self, name): super().__init__(name) self.type_name = 'relational' self.dtype = np.object_ self.attributes = [] self.dialect = None @classmethod def parse_attribute(cls, name, attr_string): """ Parse the attribute line if it knows how. Returns the parsed attribute, or None. For date attributes, the attribute string would be like 'date <format>'. """ attr_string_lower = attr_string.lower().strip() if attr_string_lower[:len('relational')] == 'relational': return cls(name) else: return None def parse_data(self, data_str): # Copy-pasted elems = list(range(len(self.attributes))) escaped_string = data_str.encode().decode("unicode-escape") row_tuples = [] for raw in escaped_string.split("\n"): row, self.dialect = split_data_line(raw, self.dialect) row_tuples.append(tuple( [self.attributes[i].parse_data(row[i]) for i in elems])) return np.array(row_tuples, [(a.name, a.dtype) for a in self.attributes]) def __str__(self): return (super(RelationalAttribute, self).__str__() + '\n\t' + '\n\t'.join(str(a) for a in self.attributes)) # ----------------- # Various utilities # ----------------- def to_attribute(name, attr_string): attr_classes = (NominalAttribute, NumericAttribute, DateAttribute, StringAttribute, RelationalAttribute) for cls in attr_classes: attr = cls.parse_attribute(name, attr_string) if attr is not None: return attr raise ParseArffError("unknown attribute %s" % attr_string) def csv_sniffer_has_bug_last_field(): """ Checks if the bug https://bugs.python.org/issue30157 is unpatched. """ # We only compute this once. has_bug = getattr(csv_sniffer_has_bug_last_field, "has_bug", None) if has_bug is None: dialect = csv.Sniffer().sniff("3, 'a'") csv_sniffer_has_bug_last_field.has_bug = dialect.quotechar != "'" has_bug = csv_sniffer_has_bug_last_field.has_bug return has_bug def workaround_csv_sniffer_bug_last_field(sniff_line, dialect, delimiters): """ Workaround for the bug https://bugs.python.org/issue30157 if is unpatched. """ if csv_sniffer_has_bug_last_field(): # Reuses code from the csv module right_regex = r'(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)' for restr in (r'(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', # ,".*?", r'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)', # .*?", right_regex, # ,".*?" r'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space) regexp = re.compile(restr, re.DOTALL | re.MULTILINE) matches = regexp.findall(sniff_line) if matches: break # If it does not match the expression that was bugged, then this bug does not apply if restr != right_regex: return groupindex = regexp.groupindex # There is only one end of the string assert len(matches) == 1 m = matches[0] n = groupindex['quote'] - 1 quote = m[n] n = groupindex['delim'] - 1 delim = m[n] n = groupindex['space'] - 1 space = bool(m[n]) dq_regexp = re.compile( r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)" % {'delim': re.escape(delim), 'quote': quote}, re.MULTILINE ) doublequote = bool(dq_regexp.search(sniff_line)) dialect.quotechar = quote if delim in delimiters: dialect.delimiter = delim dialect.doublequote = doublequote dialect.skipinitialspace = space def split_data_line(line, dialect=None): delimiters = ",\t" # This can not be done in a per reader basis, and relational fields # can be HUGE csv.field_size_limit(int(ctypes.c_ulong(-1).value // 2)) # Remove the line end if any if line[-1] == '\n': line = line[:-1] sniff_line = line # Add a delimiter if none is present, so that the csv.Sniffer # does not complain for a single-field CSV. if not any(d in line for d in delimiters): sniff_line += "," if dialect is None: dialect = csv.Sniffer().sniff(sniff_line, delimiters=delimiters) workaround_csv_sniffer_bug_last_field(sniff_line=sniff_line, dialect=dialect, delimiters=delimiters) row = next(csv.reader([line], dialect)) return row, dialect # -------------- # Parsing header # -------------- def tokenize_attribute(iterable, attribute): """Parse a raw string in header (eg starts by @attribute). Given a raw string attribute, try to get the name and type of the attribute. Constraints: * The first line must start with @attribute (case insensitive, and space like characters before @attribute are allowed) * Works also if the attribute is spread on multilines. * Works if empty lines or comments are in between Parameters ---------- attribute : str the attribute string. Returns ------- name : str name of the attribute value : str value of the attribute next : str next line to be parsed Examples -------- If attribute is a string defined in python as r"floupi real", will return floupi as name, and real as value. >>> iterable = iter([0] * 10) # dummy iterator >>> tokenize_attribute(iterable, r"@attribute floupi real") ('floupi', 'real', 0) If attribute is r"'floupi 2' real", will return 'floupi 2' as name, and real as value. >>> tokenize_attribute(iterable, r" @attribute 'floupi 2' real ") ('floupi 2', 'real', 0) """ sattr = attribute.strip() mattr = r_attribute.match(sattr) if mattr: # atrv is everything after @attribute atrv = mattr.group(1) if r_comattrval.match(atrv): name, type = tokenize_single_comma(atrv) next_item = next(iterable) elif r_wcomattrval.match(atrv): name, type = tokenize_single_wcomma(atrv) next_item = next(iterable) else: # Not sure we should support this, as it does not seem supported by # weka. raise ValueError("multi line not supported yet") else: raise ValueError("First line unparsable: %s" % sattr) attribute = to_attribute(name, type) if type.lower() == 'relational': next_item = read_relational_attribute(iterable, attribute, next_item) # raise ValueError("relational attributes not supported yet") return attribute, next_item def tokenize_single_comma(val): # XXX we match twice the same string (here and at the caller level). It is # stupid, but it is easier for now... m = r_comattrval.match(val) if m: try: name = m.group(1).strip() type = m.group(2).strip() except IndexError: raise ValueError("Error while tokenizing attribute") else: raise ValueError("Error while tokenizing single %s" % val) return name, type def tokenize_single_wcomma(val): # XXX we match twice the same string (here and at the caller level). It is # stupid, but it is easier for now... m = r_wcomattrval.match(val) if m: try: name = m.group(1).strip() type = m.group(2).strip() except IndexError: raise ValueError("Error while tokenizing attribute") else: raise ValueError("Error while tokenizing single %s" % val) return name, type def read_relational_attribute(ofile, relational_attribute, i): """Read the nested attributes of a relational attribute""" r_end_relational = re.compile(r'^@[Ee][Nn][Dd]\s*' + relational_attribute.name + r'\s*$') while not r_end_relational.match(i): m = r_headerline.match(i) if m: isattr = r_attribute.match(i) if isattr: attr, i = tokenize_attribute(ofile, i) relational_attribute.attributes.append(attr) else: raise ValueError("Error parsing line %s" % i) else: i = next(ofile) i = next(ofile) return i def read_header(ofile): """Read the header of the iterable ofile.""" i = next(ofile) # Pass first comments while r_comment.match(i): i = next(ofile) # Header is everything up to DATA attribute ? relation = None attributes = [] while not r_datameta.match(i): m = r_headerline.match(i) if m: isattr = r_attribute.match(i) if isattr: attr, i = tokenize_attribute(ofile, i) attributes.append(attr) else: isrel = r_relation.match(i) if isrel: relation = isrel.group(1) else: raise ValueError("Error parsing line %s" % i) i = next(ofile) else: i = next(ofile) return relation, attributes class MetaData(object): """Small container to keep useful information on a ARFF dataset. Knows about attributes names and types. Examples -------- :: data, meta = loadarff('iris.arff') # This will print the attributes names of the iris.arff dataset for i in meta: print(i) # This works too meta.names() # Getting attribute type types = meta.types() Methods ------- names types Notes ----- Also maintains the list of attributes in order, i.e. doing for i in meta, where meta is an instance of MetaData, will return the different attribute names in the order they were defined. """ def __init__(self, rel, attr): self.name = rel # We need the dictionary to be ordered self._attributes = OrderedDict((a.name, a) for a in attr) def __repr__(self): msg = "" msg += "Dataset: %s\n" % self.name for i in self._attributes: msg += "\t%s's type is %s" % (i, self._attributes[i].type_name) if self._attributes[i].range: msg += ", range is %s" % str(self._attributes[i].range) msg += '\n' return msg def __iter__(self): return iter(self._attributes) def __getitem__(self, key): attr = self._attributes[key] return (attr.type_name, attr.range) def names(self): """Return the list of attribute names. Returns ------- attrnames : list of str The attribute names. """ return list(self._attributes) def types(self): """Return the list of attribute types. Returns ------- attr_types : list of str The attribute types. """ attr_types = [self._attributes[name].type_name for name in self._attributes] return attr_types def loadarff(f): """ Read an arff file. The data is returned as a record array, which can be accessed much like a dictionary of numpy arrays. For example, if one of the attributes is called 'pressure', then its first 10 data points can be accessed from the ``data`` record array like so: ``data['pressure'][0:10]`` Parameters ---------- f : file-like or str File-like object to read from, or filename to open. Returns ------- data : record array The data of the arff file, accessible by attribute names. meta : `MetaData` Contains information about the arff file such as name and type of attributes, the relation (name of the dataset), etc... Raises ------ ParseArffError This is raised if the given file is not ARFF-formatted. NotImplementedError The ARFF file has an attribute which is not supported yet. Notes ----- This function should be able to read most arff files. Not implemented functionality include: * date type attributes * string type attributes It can read files with numeric and nominal attributes. It cannot read files with sparse data ({} in the file). However, this function can read files with missing data (? in the file), representing the data points as NaNs. Examples -------- >>> from scipy.io import arff >>> from io import StringIO >>> content = \"\"\" ... @relation foo ... @attribute width numeric ... @attribute height numeric ... @attribute color {red,green,blue,yellow,black} ... @data ... 5.0,3.25,blue ... 4.5,3.75,green ... 3.0,4.00,red ... \"\"\" >>> f = StringIO(content) >>> data, meta = arff.loadarff(f) >>> data array([(5.0, 3.25, 'blue'), (4.5, 3.75, 'green'), (3.0, 4.0, 'red')], dtype=[('width', '<f8'), ('height', '<f8'), ('color', '|S6')]) >>> meta Dataset: foo \twidth's type is numeric \theight's type is numeric \tcolor's type is nominal, range is ('red', 'green', 'blue', 'yellow', 'black') """ if hasattr(f, 'read'): ofile = f else: ofile = open(f, 'rt') try: return _loadarff(ofile) finally: if ofile is not f: # only close what we opened ofile.close() def _loadarff(ofile): # Parse the header file try: rel, attr = read_header(ofile) except ValueError as e: msg = "Error while parsing header, error was: " + str(e) raise ParseArffError(msg) # Check whether we have a string attribute (not supported yet) hasstr = False for a in attr: if isinstance(a, StringAttribute): hasstr = True meta = MetaData(rel, attr) # XXX The following code is not great # Build the type descriptor descr and the list of convertors to convert # each attribute to the suitable type (which should match the one in # descr). # This can be used once we want to support integer as integer values and # not as numeric anymore (using masked arrays ?). if hasstr: # How to support string efficiently ? Ideally, we should know the max # size of the string before allocating the numpy array. raise NotImplementedError("String attributes not supported yet, sorry") ni = len(attr) def generator(row_iter, delim=','): # TODO: this is where we are spending times (~80%). I think things # could be made more efficiently: # - We could for example "compile" the function, because some values # do not change here. # - The function to convert a line to dtyped values could also be # generated on the fly from a string and be executed instead of # looping. # - The regex are overkill: for comments, checking that a line starts # by % should be enough and faster, and for empty lines, same thing # --> this does not seem to change anything. # 'compiling' the range since it does not change # Note, I have already tried zipping the converters and # row elements and got slightly worse performance. elems = list(range(ni)) dialect = None for raw in row_iter: # We do not abstract skipping comments and empty lines for # performance reasons. if r_comment.match(raw) or r_empty.match(raw): continue row, dialect = split_data_line(raw, dialect) yield tuple([attr[i].parse_data(row[i]) for i in elems]) a = list(generator(ofile)) # No error should happen here: it is a bug otherwise data = np.array(a, [(a.name, a.dtype) for a in attr]) return data, meta # ---- # Misc # ---- def basic_stats(data): nbfac = data.size * 1. / (data.size - 1) return np.nanmin(data), np.nanmax(data), np.mean(data), np.std(data) * nbfac def print_attribute(name, tp, data): type = tp.type_name if type == 'numeric' or type == 'real' or type == 'integer': min, max, mean, std = basic_stats(data) print("%s,%s,%f,%f,%f,%f" % (name, type, min, max, mean, std)) else: print(str(tp)) def test_weka(filename): data, meta = loadarff(filename) print(len(data.dtype)) print(data.size) for i in meta: print_attribute(i, meta[i], data[i]) # make sure nose does not find this as a test test_weka.__test__ = False if __name__ == '__main__': import sys filename = sys.argv[1] test_weka(filename)
bsd-3-clause
attente/snapcraft
snapcraft/tests/test_fixture_setup.py
12
2338
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*- # # Copyright (C) 2016 Canonical Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import http.client import http.server import json import urllib.parse from snapcraft import tests from snapcraft.tests import fixture_setup class TestFakeServer(http.server.HTTPServer): def __init__(self, server_address): super().__init__( server_address, TestFakeRequestHandler) class TestFakeRequestHandler(http.server.BaseHTTPRequestHandler): def do_GET(self): self.send_response(200) self.send_header('Content-Type', 'application/json') self.end_headers() self.wfile.write(json.dumps('{}').encode()) def log_message(*args): # Do not print anything during the tests. pass class FakeServerRunningTestCase(tests.TestCase): def setUp(self): super().setUp() self.fake_server_fixture = fixture_setup._FakeServerRunning() self.fake_server_fixture.fake_server = TestFakeServer def start_fake_server(self): self.useFixture(self.fake_server_fixture) self.netloc = urllib.parse.urlparse( self.fake_server_fixture.url).netloc def do_request(self, method, path): connection = http.client.HTTPConnection(self.netloc) self.addCleanup(connection.close) connection.request(method, path) response = connection.getresponse() return response.status def assert_server_not_running(self): self.assertRaises( Exception, self.do_request, 'GET', '/') def test_server_must_start_and_stop(self): self.addCleanup(self.assert_server_not_running) self.start_fake_server() status = self.do_request('GET', '/') self.assertEqual(status, 200)
gpl-3.0
Zhaoyanzhang/-myflasky
venv/lib/python2.7/site-packages/sqlalchemy/testing/fixtures.py
32
10721
# testing/fixtures.py # Copyright (C) 2005-2017 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from . import config from . import assertions, schema from .util import adict from .. import util from .engines import drop_all_tables from .entities import BasicEntity, ComparableEntity import sys import sqlalchemy as sa from sqlalchemy.ext.declarative import declarative_base, DeclarativeMeta # whether or not we use unittest changes things dramatically, # as far as how py.test collection works. class TestBase(object): # A sequence of database names to always run, regardless of the # constraints below. __whitelist__ = () # A sequence of requirement names matching testing.requires decorators __requires__ = () # A sequence of dialect names to exclude from the test class. __unsupported_on__ = () # If present, test class is only runnable for the *single* specified # dialect. If you need multiple, use __unsupported_on__ and invert. __only_on__ = None # A sequence of no-arg callables. If any are True, the entire testcase is # skipped. __skip_if__ = None def assert_(self, val, msg=None): assert val, msg # apparently a handful of tests are doing this....OK def setup(self): if hasattr(self, "setUp"): self.setUp() def teardown(self): if hasattr(self, "tearDown"): self.tearDown() class TablesTest(TestBase): # 'once', None run_setup_bind = 'once' # 'once', 'each', None run_define_tables = 'once' # 'once', 'each', None run_create_tables = 'once' # 'once', 'each', None run_inserts = 'each' # 'each', None run_deletes = 'each' # 'once', None run_dispose_bind = None bind = None metadata = None tables = None other = None @classmethod def setup_class(cls): cls._init_class() cls._setup_once_tables() cls._setup_once_inserts() @classmethod def _init_class(cls): if cls.run_define_tables == 'each': if cls.run_create_tables == 'once': cls.run_create_tables = 'each' assert cls.run_inserts in ('each', None) cls.other = adict() cls.tables = adict() cls.bind = cls.setup_bind() cls.metadata = sa.MetaData() cls.metadata.bind = cls.bind @classmethod def _setup_once_inserts(cls): if cls.run_inserts == 'once': cls._load_fixtures() cls.insert_data() @classmethod def _setup_once_tables(cls): if cls.run_define_tables == 'once': cls.define_tables(cls.metadata) if cls.run_create_tables == 'once': cls.metadata.create_all(cls.bind) cls.tables.update(cls.metadata.tables) def _setup_each_tables(self): if self.run_define_tables == 'each': self.tables.clear() if self.run_create_tables == 'each': drop_all_tables(self.metadata, self.bind) self.metadata.clear() self.define_tables(self.metadata) if self.run_create_tables == 'each': self.metadata.create_all(self.bind) self.tables.update(self.metadata.tables) elif self.run_create_tables == 'each': drop_all_tables(self.metadata, self.bind) self.metadata.create_all(self.bind) def _setup_each_inserts(self): if self.run_inserts == 'each': self._load_fixtures() self.insert_data() def _teardown_each_tables(self): # no need to run deletes if tables are recreated on setup if self.run_define_tables != 'each' and self.run_deletes == 'each': with self.bind.connect() as conn: for table in reversed(self.metadata.sorted_tables): try: conn.execute(table.delete()) except sa.exc.DBAPIError as ex: util.print_( ("Error emptying table %s: %r" % (table, ex)), file=sys.stderr) def setup(self): self._setup_each_tables() self._setup_each_inserts() def teardown(self): self._teardown_each_tables() @classmethod def _teardown_once_metadata_bind(cls): if cls.run_create_tables: drop_all_tables(cls.metadata, cls.bind) if cls.run_dispose_bind == 'once': cls.dispose_bind(cls.bind) cls.metadata.bind = None if cls.run_setup_bind is not None: cls.bind = None @classmethod def teardown_class(cls): cls._teardown_once_metadata_bind() @classmethod def setup_bind(cls): return config.db @classmethod def dispose_bind(cls, bind): if hasattr(bind, 'dispose'): bind.dispose() elif hasattr(bind, 'close'): bind.close() @classmethod def define_tables(cls, metadata): pass @classmethod def fixtures(cls): return {} @classmethod def insert_data(cls): pass def sql_count_(self, count, fn): self.assert_sql_count(self.bind, fn, count) def sql_eq_(self, callable_, statements): self.assert_sql(self.bind, callable_, statements) @classmethod def _load_fixtures(cls): """Insert rows as represented by the fixtures() method.""" headers, rows = {}, {} for table, data in cls.fixtures().items(): if len(data) < 2: continue if isinstance(table, util.string_types): table = cls.tables[table] headers[table] = data[0] rows[table] = data[1:] for table in cls.metadata.sorted_tables: if table not in headers: continue cls.bind.execute( table.insert(), [dict(zip(headers[table], column_values)) for column_values in rows[table]]) from sqlalchemy import event class RemovesEvents(object): @util.memoized_property def _event_fns(self): return set() def event_listen(self, target, name, fn): self._event_fns.add((target, name, fn)) event.listen(target, name, fn) def teardown(self): for key in self._event_fns: event.remove(*key) super_ = super(RemovesEvents, self) if hasattr(super_, "teardown"): super_.teardown() class _ORMTest(object): @classmethod def teardown_class(cls): sa.orm.session.Session.close_all() sa.orm.clear_mappers() class ORMTest(_ORMTest, TestBase): pass class MappedTest(_ORMTest, TablesTest, assertions.AssertsExecutionResults): # 'once', 'each', None run_setup_classes = 'once' # 'once', 'each', None run_setup_mappers = 'each' classes = None @classmethod def setup_class(cls): cls._init_class() if cls.classes is None: cls.classes = adict() cls._setup_once_tables() cls._setup_once_classes() cls._setup_once_mappers() cls._setup_once_inserts() @classmethod def teardown_class(cls): cls._teardown_once_class() cls._teardown_once_metadata_bind() def setup(self): self._setup_each_tables() self._setup_each_classes() self._setup_each_mappers() self._setup_each_inserts() def teardown(self): sa.orm.session.Session.close_all() self._teardown_each_mappers() self._teardown_each_classes() self._teardown_each_tables() @classmethod def _teardown_once_class(cls): cls.classes.clear() _ORMTest.teardown_class() @classmethod def _setup_once_classes(cls): if cls.run_setup_classes == 'once': cls._with_register_classes(cls.setup_classes) @classmethod def _setup_once_mappers(cls): if cls.run_setup_mappers == 'once': cls._with_register_classes(cls.setup_mappers) def _setup_each_mappers(self): if self.run_setup_mappers == 'each': self._with_register_classes(self.setup_mappers) def _setup_each_classes(self): if self.run_setup_classes == 'each': self._with_register_classes(self.setup_classes) @classmethod def _with_register_classes(cls, fn): """Run a setup method, framing the operation with a Base class that will catch new subclasses to be established within the "classes" registry. """ cls_registry = cls.classes class FindFixture(type): def __init__(cls, classname, bases, dict_): cls_registry[classname] = cls return type.__init__(cls, classname, bases, dict_) class _Base(util.with_metaclass(FindFixture, object)): pass class Basic(BasicEntity, _Base): pass class Comparable(ComparableEntity, _Base): pass cls.Basic = Basic cls.Comparable = Comparable fn() def _teardown_each_mappers(self): # some tests create mappers in the test bodies # and will define setup_mappers as None - # clear mappers in any case if self.run_setup_mappers != 'once': sa.orm.clear_mappers() def _teardown_each_classes(self): if self.run_setup_classes != 'once': self.classes.clear() @classmethod def setup_classes(cls): pass @classmethod def setup_mappers(cls): pass class DeclarativeMappedTest(MappedTest): run_setup_classes = 'once' run_setup_mappers = 'once' @classmethod def _setup_once_tables(cls): pass @classmethod def _with_register_classes(cls, fn): cls_registry = cls.classes class FindFixtureDeclarative(DeclarativeMeta): def __init__(cls, classname, bases, dict_): cls_registry[classname] = cls return DeclarativeMeta.__init__( cls, classname, bases, dict_) class DeclarativeBasic(object): __table_cls__ = schema.Table _DeclBase = declarative_base(metadata=cls.metadata, metaclass=FindFixtureDeclarative, cls=DeclarativeBasic) cls.DeclarativeBasic = _DeclBase fn() if cls.metadata.tables and cls.run_create_tables: cls.metadata.create_all(config.db)
mit
nikste/tensorflow
tensorflow/examples/tutorials/mnist/mnist_softmax_xla.py
94
3684
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Simple MNIST classifier example with JIT XLA and timelines. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import sys import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data from tensorflow.python.client import timeline FLAGS = None def main(_): # Import data mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True) # Create the model x = tf.placeholder(tf.float32, [None, 784]) w = tf.Variable(tf.zeros([784, 10])) b = tf.Variable(tf.zeros([10])) y = tf.matmul(x, w) + b # Define loss and optimizer y_ = tf.placeholder(tf.float32, [None, 10]) # The raw formulation of cross-entropy, # # tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)), # reduction_indices=[1])) # # can be numerically unstable. # # So here we use tf.nn.softmax_cross_entropy_with_logits on the raw # outputs of 'y', and then average across the batch. cross_entropy = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)) train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) config = tf.ConfigProto() jit_level = 0 if FLAGS.xla: # Turns on XLA JIT compilation. jit_level = tf.OptimizerOptions.ON_1 config.graph_options.optimizer_options.global_jit_level = jit_level run_metadata = tf.RunMetadata() sess = tf.Session(config=config) tf.global_variables_initializer().run(session=sess) # Train train_loops = 1000 for i in range(train_loops): batch_xs, batch_ys = mnist.train.next_batch(100) # Create a timeline for the last loop and export to json to view with # chrome://tracing/. if i == train_loops - 1: sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys}, options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE), run_metadata=run_metadata) trace = timeline.Timeline(step_stats=run_metadata.step_stats) with open('timeline.ctf.json', 'w') as trace_file: trace_file.write(trace.generate_chrome_trace_format()) else: sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys}) # Test trained model correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})) sess.close() if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( '--data_dir', type=str, default='/tmp/tensorflow/mnist/input_data', help='Directory for storing input data') parser.add_argument( '--xla', type=bool, default=True, help='Turn xla via JIT on') FLAGS, unparsed = parser.parse_known_args() tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
apache-2.0
perkinslr/pypyjs
addedLibraries/twisted/protocols/memcache.py
42
23218
# -*- test-case-name: twisted.test.test_memcache -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Memcache client protocol. Memcached is a caching server, storing data in the form of pairs key/value, and memcache is the protocol to talk with it. To connect to a server, create a factory for L{MemCacheProtocol}:: from twisted.internet import reactor, protocol from twisted.protocols.memcache import MemCacheProtocol, DEFAULT_PORT d = protocol.ClientCreator(reactor, MemCacheProtocol ).connectTCP("localhost", DEFAULT_PORT) def doSomething(proto): # Here you call the memcache operations return proto.set("mykey", "a lot of data") d.addCallback(doSomething) reactor.run() All the operations of the memcache protocol are present, but L{MemCacheProtocol.set} and L{MemCacheProtocol.get} are the more important. See U{http://code.sixapart.com/svn/memcached/trunk/server/doc/protocol.txt} for more information about the protocol. """ from collections import deque from twisted.protocols.basic import LineReceiver from twisted.protocols.policies import TimeoutMixin from twisted.internet.defer import Deferred, fail, TimeoutError from twisted.python import log DEFAULT_PORT = 11211 class NoSuchCommand(Exception): """ Exception raised when a non existent command is called. """ class ClientError(Exception): """ Error caused by an invalid client call. """ class ServerError(Exception): """ Problem happening on the server. """ class Command(object): """ Wrap a client action into an object, that holds the values used in the protocol. @ivar _deferred: the L{Deferred} object that will be fired when the result arrives. @type _deferred: L{Deferred} @ivar command: name of the command sent to the server. @type command: C{str} """ def __init__(self, command, **kwargs): """ Create a command. @param command: the name of the command. @type command: C{str} @param kwargs: this values will be stored as attributes of the object for future use """ self.command = command self._deferred = Deferred() for k, v in kwargs.items(): setattr(self, k, v) def success(self, value): """ Shortcut method to fire the underlying deferred. """ self._deferred.callback(value) def fail(self, error): """ Make the underlying deferred fails. """ self._deferred.errback(error) class MemCacheProtocol(LineReceiver, TimeoutMixin): """ MemCache protocol: connect to a memcached server to store/retrieve values. @ivar persistentTimeOut: the timeout period used to wait for a response. @type persistentTimeOut: C{int} @ivar _current: current list of requests waiting for an answer from the server. @type _current: C{deque} of L{Command} @ivar _lenExpected: amount of data expected in raw mode, when reading for a value. @type _lenExpected: C{int} @ivar _getBuffer: current buffer of data, used to store temporary data when reading in raw mode. @type _getBuffer: C{list} @ivar _bufferLength: the total amount of bytes in C{_getBuffer}. @type _bufferLength: C{int} @ivar _disconnected: indicate if the connectionLost has been called or not. @type _disconnected: C{bool} """ MAX_KEY_LENGTH = 250 _disconnected = False def __init__(self, timeOut=60): """ Create the protocol. @param timeOut: the timeout to wait before detecting that the connection is dead and close it. It's expressed in seconds. @type timeOut: C{int} """ self._current = deque() self._lenExpected = None self._getBuffer = None self._bufferLength = None self.persistentTimeOut = self.timeOut = timeOut def _cancelCommands(self, reason): """ Cancel all the outstanding commands, making them fail with C{reason}. """ while self._current: cmd = self._current.popleft() cmd.fail(reason) def timeoutConnection(self): """ Close the connection in case of timeout. """ self._cancelCommands(TimeoutError("Connection timeout")) self.transport.loseConnection() def connectionLost(self, reason): """ Cause any outstanding commands to fail. """ self._disconnected = True self._cancelCommands(reason) LineReceiver.connectionLost(self, reason) def sendLine(self, line): """ Override sendLine to add a timeout to response. """ if not self._current: self.setTimeout(self.persistentTimeOut) LineReceiver.sendLine(self, line) def rawDataReceived(self, data): """ Collect data for a get. """ self.resetTimeout() self._getBuffer.append(data) self._bufferLength += len(data) if self._bufferLength >= self._lenExpected + 2: data = "".join(self._getBuffer) buf = data[:self._lenExpected] rem = data[self._lenExpected + 2:] val = buf self._lenExpected = None self._getBuffer = None self._bufferLength = None cmd = self._current[0] if cmd.multiple: flags, cas = cmd.values[cmd.currentKey] cmd.values[cmd.currentKey] = (flags, cas, val) else: cmd.value = val self.setLineMode(rem) def cmd_STORED(self): """ Manage a success response to a set operation. """ self._current.popleft().success(True) def cmd_NOT_STORED(self): """ Manage a specific 'not stored' response to a set operation: this is not an error, but some condition wasn't met. """ self._current.popleft().success(False) def cmd_END(self): """ This the end token to a get or a stat operation. """ cmd = self._current.popleft() if cmd.command == "get": if cmd.multiple: values = dict([(key, val[::2]) for key, val in cmd.values.iteritems()]) cmd.success(values) else: cmd.success((cmd.flags, cmd.value)) elif cmd.command == "gets": if cmd.multiple: cmd.success(cmd.values) else: cmd.success((cmd.flags, cmd.cas, cmd.value)) elif cmd.command == "stats": cmd.success(cmd.values) def cmd_NOT_FOUND(self): """ Manage error response for incr/decr/delete. """ self._current.popleft().success(False) def cmd_VALUE(self, line): """ Prepare the reading a value after a get. """ cmd = self._current[0] if cmd.command == "get": key, flags, length = line.split() cas = "" else: key, flags, length, cas = line.split() self._lenExpected = int(length) self._getBuffer = [] self._bufferLength = 0 if cmd.multiple: if key not in cmd.keys: raise RuntimeError("Unexpected commands answer.") cmd.currentKey = key cmd.values[key] = [int(flags), cas] else: if cmd.key != key: raise RuntimeError("Unexpected commands answer.") cmd.flags = int(flags) cmd.cas = cas self.setRawMode() def cmd_STAT(self, line): """ Reception of one stat line. """ cmd = self._current[0] key, val = line.split(" ", 1) cmd.values[key] = val def cmd_VERSION(self, versionData): """ Read version token. """ self._current.popleft().success(versionData) def cmd_ERROR(self): """ An non-existent command has been sent. """ log.err("Non-existent command sent.") cmd = self._current.popleft() cmd.fail(NoSuchCommand()) def cmd_CLIENT_ERROR(self, errText): """ An invalid input as been sent. """ log.err("Invalid input: %s" % (errText,)) cmd = self._current.popleft() cmd.fail(ClientError(errText)) def cmd_SERVER_ERROR(self, errText): """ An error has happened server-side. """ log.err("Server error: %s" % (errText,)) cmd = self._current.popleft() cmd.fail(ServerError(errText)) def cmd_DELETED(self): """ A delete command has completed successfully. """ self._current.popleft().success(True) def cmd_OK(self): """ The last command has been completed. """ self._current.popleft().success(True) def cmd_EXISTS(self): """ A C{checkAndSet} update has failed. """ self._current.popleft().success(False) def lineReceived(self, line): """ Receive line commands from the server. """ self.resetTimeout() token = line.split(" ", 1)[0] # First manage standard commands without space cmd = getattr(self, "cmd_%s" % (token,), None) if cmd is not None: args = line.split(" ", 1)[1:] if args: cmd(args[0]) else: cmd() else: # Then manage commands with space in it line = line.replace(" ", "_") cmd = getattr(self, "cmd_%s" % (line,), None) if cmd is not None: cmd() else: # Increment/Decrement response cmd = self._current.popleft() val = int(line) cmd.success(val) if not self._current: # No pending request, remove timeout self.setTimeout(None) def increment(self, key, val=1): """ Increment the value of C{key} by given value (default to 1). C{key} must be consistent with an int. Return the new value. @param key: the key to modify. @type key: C{str} @param val: the value to increment. @type val: C{int} @return: a deferred with will be called back with the new value associated with the key (after the increment). @rtype: L{Deferred} """ return self._incrdecr("incr", key, val) def decrement(self, key, val=1): """ Decrement the value of C{key} by given value (default to 1). C{key} must be consistent with an int. Return the new value, coerced to 0 if negative. @param key: the key to modify. @type key: C{str} @param val: the value to decrement. @type val: C{int} @return: a deferred with will be called back with the new value associated with the key (after the decrement). @rtype: L{Deferred} """ return self._incrdecr("decr", key, val) def _incrdecr(self, cmd, key, val): """ Internal wrapper for incr/decr. """ if self._disconnected: return fail(RuntimeError("not connected")) if not isinstance(key, str): return fail(ClientError( "Invalid type for key: %s, expecting a string" % (type(key),))) if len(key) > self.MAX_KEY_LENGTH: return fail(ClientError("Key too long")) fullcmd = "%s %s %d" % (cmd, key, int(val)) self.sendLine(fullcmd) cmdObj = Command(cmd, key=key) self._current.append(cmdObj) return cmdObj._deferred def replace(self, key, val, flags=0, expireTime=0): """ Replace the given C{key}. It must already exist in the server. @param key: the key to replace. @type key: C{str} @param val: the new value associated with the key. @type val: C{str} @param flags: the flags to store with the key. @type flags: C{int} @param expireTime: if different from 0, the relative time in seconds when the key will be deleted from the store. @type expireTime: C{int} @return: a deferred that will fire with C{True} if the operation has succeeded, and C{False} with the key didn't previously exist. @rtype: L{Deferred} """ return self._set("replace", key, val, flags, expireTime, "") def add(self, key, val, flags=0, expireTime=0): """ Add the given C{key}. It must not exist in the server. @param key: the key to add. @type key: C{str} @param val: the value associated with the key. @type val: C{str} @param flags: the flags to store with the key. @type flags: C{int} @param expireTime: if different from 0, the relative time in seconds when the key will be deleted from the store. @type expireTime: C{int} @return: a deferred that will fire with C{True} if the operation has succeeded, and C{False} with the key already exists. @rtype: L{Deferred} """ return self._set("add", key, val, flags, expireTime, "") def set(self, key, val, flags=0, expireTime=0): """ Set the given C{key}. @param key: the key to set. @type key: C{str} @param val: the value associated with the key. @type val: C{str} @param flags: the flags to store with the key. @type flags: C{int} @param expireTime: if different from 0, the relative time in seconds when the key will be deleted from the store. @type expireTime: C{int} @return: a deferred that will fire with C{True} if the operation has succeeded. @rtype: L{Deferred} """ return self._set("set", key, val, flags, expireTime, "") def checkAndSet(self, key, val, cas, flags=0, expireTime=0): """ Change the content of C{key} only if the C{cas} value matches the current one associated with the key. Use this to store a value which hasn't been modified since last time you fetched it. @param key: The key to set. @type key: C{str} @param val: The value associated with the key. @type val: C{str} @param cas: Unique 64-bit value returned by previous call of C{get}. @type cas: C{str} @param flags: The flags to store with the key. @type flags: C{int} @param expireTime: If different from 0, the relative time in seconds when the key will be deleted from the store. @type expireTime: C{int} @return: A deferred that will fire with C{True} if the operation has succeeded, C{False} otherwise. @rtype: L{Deferred} """ return self._set("cas", key, val, flags, expireTime, cas) def _set(self, cmd, key, val, flags, expireTime, cas): """ Internal wrapper for setting values. """ if self._disconnected: return fail(RuntimeError("not connected")) if not isinstance(key, str): return fail(ClientError( "Invalid type for key: %s, expecting a string" % (type(key),))) if len(key) > self.MAX_KEY_LENGTH: return fail(ClientError("Key too long")) if not isinstance(val, str): return fail(ClientError( "Invalid type for value: %s, expecting a string" % (type(val),))) if cas: cas = " " + cas length = len(val) fullcmd = "%s %s %d %d %d%s" % ( cmd, key, flags, expireTime, length, cas) self.sendLine(fullcmd) self.sendLine(val) cmdObj = Command(cmd, key=key, flags=flags, length=length) self._current.append(cmdObj) return cmdObj._deferred def append(self, key, val): """ Append given data to the value of an existing key. @param key: The key to modify. @type key: C{str} @param val: The value to append to the current value associated with the key. @type val: C{str} @return: A deferred that will fire with C{True} if the operation has succeeded, C{False} otherwise. @rtype: L{Deferred} """ # Even if flags and expTime values are ignored, we have to pass them return self._set("append", key, val, 0, 0, "") def prepend(self, key, val): """ Prepend given data to the value of an existing key. @param key: The key to modify. @type key: C{str} @param val: The value to prepend to the current value associated with the key. @type val: C{str} @return: A deferred that will fire with C{True} if the operation has succeeded, C{False} otherwise. @rtype: L{Deferred} """ # Even if flags and expTime values are ignored, we have to pass them return self._set("prepend", key, val, 0, 0, "") def get(self, key, withIdentifier=False): """ Get the given C{key}. It doesn't support multiple keys. If C{withIdentifier} is set to C{True}, the command issued is a C{gets}, that will return the current identifier associated with the value. This identifier has to be used when issuing C{checkAndSet} update later, using the corresponding method. @param key: The key to retrieve. @type key: C{str} @param withIdentifier: If set to C{True}, retrieve the current identifier along with the value and the flags. @type withIdentifier: C{bool} @return: A deferred that will fire with the tuple (flags, value) if C{withIdentifier} is C{False}, or (flags, cas identifier, value) if C{True}. If the server indicates there is no value associated with C{key}, the returned value will be C{None} and the returned flags will be C{0}. @rtype: L{Deferred} """ return self._get([key], withIdentifier, False) def getMultiple(self, keys, withIdentifier=False): """ Get the given list of C{keys}. If C{withIdentifier} is set to C{True}, the command issued is a C{gets}, that will return the identifiers associated with each values. This identifier has to be used when issuing C{checkAndSet} update later, using the corresponding method. @param keys: The keys to retrieve. @type keys: C{list} of C{str} @param withIdentifier: If set to C{True}, retrieve the identifiers along with the values and the flags. @type withIdentifier: C{bool} @return: A deferred that will fire with a dictionary with the elements of C{keys} as keys and the tuples (flags, value) as values if C{withIdentifier} is C{False}, or (flags, cas identifier, value) if C{True}. If the server indicates there is no value associated with C{key}, the returned values will be C{None} and the returned flags will be C{0}. @rtype: L{Deferred} @since: 9.0 """ return self._get(keys, withIdentifier, True) def _get(self, keys, withIdentifier, multiple): """ Helper method for C{get} and C{getMultiple}. """ if self._disconnected: return fail(RuntimeError("not connected")) for key in keys: if not isinstance(key, str): return fail(ClientError( "Invalid type for key: %s, expecting a string" % (type(key),))) if len(key) > self.MAX_KEY_LENGTH: return fail(ClientError("Key too long")) if withIdentifier: cmd = "gets" else: cmd = "get" fullcmd = "%s %s" % (cmd, " ".join(keys)) self.sendLine(fullcmd) if multiple: values = dict([(key, (0, "", None)) for key in keys]) cmdObj = Command(cmd, keys=keys, values=values, multiple=True) else: cmdObj = Command(cmd, key=keys[0], value=None, flags=0, cas="", multiple=False) self._current.append(cmdObj) return cmdObj._deferred def stats(self, arg=None): """ Get some stats from the server. It will be available as a dict. @param arg: An optional additional string which will be sent along with the I{stats} command. The interpretation of this value by the server is left undefined by the memcache protocol specification. @type arg: L{NoneType} or L{str} @return: a deferred that will fire with a C{dict} of the available statistics. @rtype: L{Deferred} """ if arg: cmd = "stats " + arg else: cmd = "stats" if self._disconnected: return fail(RuntimeError("not connected")) self.sendLine(cmd) cmdObj = Command("stats", values={}) self._current.append(cmdObj) return cmdObj._deferred def version(self): """ Get the version of the server. @return: a deferred that will fire with the string value of the version. @rtype: L{Deferred} """ if self._disconnected: return fail(RuntimeError("not connected")) self.sendLine("version") cmdObj = Command("version") self._current.append(cmdObj) return cmdObj._deferred def delete(self, key): """ Delete an existing C{key}. @param key: the key to delete. @type key: C{str} @return: a deferred that will be called back with C{True} if the key was successfully deleted, or C{False} if not. @rtype: L{Deferred} """ if self._disconnected: return fail(RuntimeError("not connected")) if not isinstance(key, str): return fail(ClientError( "Invalid type for key: %s, expecting a string" % (type(key),))) self.sendLine("delete %s" % key) cmdObj = Command("delete", key=key) self._current.append(cmdObj) return cmdObj._deferred def flushAll(self): """ Flush all cached values. @return: a deferred that will be called back with C{True} when the operation has succeeded. @rtype: L{Deferred} """ if self._disconnected: return fail(RuntimeError("not connected")) self.sendLine("flush_all") cmdObj = Command("flush_all") self._current.append(cmdObj) return cmdObj._deferred __all__ = ["MemCacheProtocol", "DEFAULT_PORT", "NoSuchCommand", "ClientError", "ServerError"]
mit
WQuanfeng/wagtail
wagtail/wagtailusers/views/users.py
11
4527
from django.shortcuts import render, redirect, get_object_or_404 from django.contrib.auth import get_user_model from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger from django.core.urlresolvers import reverse from django.db.models import Q from django.utils.translation import ugettext as _ from django.views.decorators.vary import vary_on_headers from wagtail.wagtailadmin import messages from wagtail.wagtailadmin.forms import SearchForm from wagtail.wagtailadmin.utils import permission_required, any_permission_required from wagtail.wagtailusers.forms import UserCreationForm, UserEditForm from wagtail.wagtailcore.compat import AUTH_USER_APP_LABEL, AUTH_USER_MODEL_NAME User = get_user_model() # Typically we would check the permission 'auth.change_user' (and 'auth.add_user' / # 'auth.delete_user') for user management actions, but this may vary according to # the AUTH_USER_MODEL setting add_user_perm = "{0}.add_{1}".format(AUTH_USER_APP_LABEL, AUTH_USER_MODEL_NAME.lower()) change_user_perm = "{0}.change_{1}".format(AUTH_USER_APP_LABEL, AUTH_USER_MODEL_NAME.lower()) delete_user_perm = "{0}.delete_{1}".format(AUTH_USER_APP_LABEL, AUTH_USER_MODEL_NAME.lower()) @any_permission_required(add_user_perm, change_user_perm, delete_user_perm) @vary_on_headers('X-Requested-With') def index(request): q = None p = request.GET.get("p", 1) is_searching = False if 'q' in request.GET: form = SearchForm(request.GET, placeholder=_("Search users")) if form.is_valid(): q = form.cleaned_data['q'] is_searching = True if User.USERNAME_FIELD == 'username': users = User.objects.filter(Q(username__icontains=q) | Q(first_name__icontains=q) | Q(last_name__icontains=q) | Q(email__icontains=q)) else: users = User.objects.filter(Q(first_name__icontains=q) | Q(last_name__icontains=q) | Q(email__icontains=q)) else: form = SearchForm(placeholder=_("Search users")) if not is_searching: users = User.objects users = users.order_by('last_name', 'first_name') if 'ordering' in request.GET: ordering = request.GET['ordering'] if ordering in ['name', 'username']: if ordering != 'name': users = users.order_by(ordering) else: ordering = 'name' paginator = Paginator(users, 20) try: users = paginator.page(p) except PageNotAnInteger: users = paginator.page(1) except EmptyPage: users = paginator.page(paginator.num_pages) if request.is_ajax(): return render(request, "wagtailusers/users/results.html", { 'users': users, 'is_searching': is_searching, 'query_string': q, 'ordering': ordering, }) else: return render(request, "wagtailusers/users/index.html", { 'search_form': form, 'users': users, 'is_searching': is_searching, 'ordering': ordering, 'query_string': q, }) @permission_required(add_user_perm) def create(request): if request.POST: form = UserCreationForm(request.POST) if form.is_valid(): user = form.save() messages.success(request, _("User '{0}' created.").format(user), buttons=[ messages.button(reverse('wagtailusers_users:edit', args=(user.id,)), _('Edit')) ]) return redirect('wagtailusers_users:index') else: messages.error(request, _("The user could not be created due to errors.")) else: form = UserCreationForm() return render(request, 'wagtailusers/users/create.html', { 'form': form, }) @permission_required(change_user_perm) def edit(request, user_id): user = get_object_or_404(User, id=user_id) if request.POST: form = UserEditForm(request.POST, instance=user) if form.is_valid(): user = form.save() messages.success(request, _("User '{0}' updated.").format(user), buttons=[ messages.button(reverse('wagtailusers_users:edit', args=(user.id,)), _('Edit')) ]) return redirect('wagtailusers_users:index') else: messages.error(request, _("The user could not be saved due to errors.")) else: form = UserEditForm(instance=user) return render(request, 'wagtailusers/users/edit.html', { 'user': user, 'form': form, })
bsd-3-clause
muxi/grpc
test/cpp/naming/resolver_component_tests_runner.py
4
29310
#!/usr/bin/env python # Copyright 2015 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This file is auto-generated import argparse import sys import subprocess import tempfile import os import time import signal import platform argp = argparse.ArgumentParser(description='Run c-ares resolver tests') argp.add_argument('--test_bin_path', default=None, type=str, help='Path to gtest test binary to invoke.') argp.add_argument('--dns_server_bin_path', default=None, type=str, help='Path to local DNS server python script.') argp.add_argument('--records_config_path', default=None, type=str, help=('Path to DNS records yaml file that ' 'specifies records for the DNS sever. ')) argp.add_argument('--dns_server_port', default=None, type=int, help=('Port that local DNS server is listening on.')) argp.add_argument('--dns_resolver_bin_path', default=None, type=str, help=('Path to the DNS health check utility.')) argp.add_argument('--tcp_connect_bin_path', default=None, type=str, help=('Path to the TCP health check utility.')) args = argp.parse_args() def test_runner_log(msg): sys.stderr.write('\n%s: %s\n' % (__file__, msg)) def python_args(arg_list): if platform.system() == 'Windows': return [sys.executable] + arg_list return arg_list cur_resolver = os.environ.get('GRPC_DNS_RESOLVER') if cur_resolver and cur_resolver != 'ares': test_runner_log(('WARNING: cur resolver set to %s. This set of tests ' 'needs to use GRPC_DNS_RESOLVER=ares.')) test_runner_log('Exit 1 without running tests.') sys.exit(1) os.environ.update({'GRPC_TRACE': 'cares_resolver'}) def wait_until_dns_server_is_up(args, dns_server_subprocess, dns_server_subprocess_output): for i in range(0, 30): test_runner_log('Health check: attempt to connect to DNS server over TCP.') tcp_connect_subprocess = subprocess.Popen(python_args([ args.tcp_connect_bin_path, '--server_host', '127.0.0.1', '--server_port', str(args.dns_server_port), '--timeout', str(1)])) tcp_connect_subprocess.communicate() if tcp_connect_subprocess.returncode == 0: test_runner_log(('Health check: attempt to make an A-record ' 'query to DNS server.')) dns_resolver_subprocess = subprocess.Popen(python_args([ args.dns_resolver_bin_path, '--qname', 'health-check-local-dns-server-is-alive.resolver-tests.grpctestingexp', '--server_host', '127.0.0.1', '--server_port', str(args.dns_server_port)]), stdout=subprocess.PIPE) dns_resolver_stdout, _ = dns_resolver_subprocess.communicate() if dns_resolver_subprocess.returncode == 0: if '123.123.123.123' in dns_resolver_stdout: test_runner_log(('DNS server is up! ' 'Successfully reached it over UDP and TCP.')) return time.sleep(0.1) dns_server_subprocess.kill() dns_server_subprocess.wait() test_runner_log(('Failed to reach DNS server over TCP and/or UDP. ' 'Exitting without running tests.')) test_runner_log('======= DNS server stdout ' '(merged stdout and stderr) =============') with open(dns_server_subprocess_output, 'r') as l: test_runner_log(l.read()) test_runner_log('======= end DNS server output=========') sys.exit(1) dns_server_subprocess_output = tempfile.mktemp() with open(dns_server_subprocess_output, 'w') as l: dns_server_subprocess = subprocess.Popen(python_args([ args.dns_server_bin_path, '--port', str(args.dns_server_port), '--records_config_path', args.records_config_path]), stdin=subprocess.PIPE, stdout=l, stderr=l) def _quit_on_signal(signum, _frame): test_runner_log('Received signal: %d' % signum) dns_server_subprocess.kill() dns_server_subprocess.wait() sys.exit(1) signal.signal(signal.SIGINT, _quit_on_signal) signal.signal(signal.SIGTERM, _quit_on_signal) wait_until_dns_server_is_up(args, dns_server_subprocess, dns_server_subprocess_output) num_test_failures = 0 test_runner_log('Run test with target: %s' % 'no-srv-ipv4-single-target.resolver-tests-version-4.grpctestingexp.') current_test_subprocess = subprocess.Popen([ args.test_bin_path, '--target_name', 'no-srv-ipv4-single-target.resolver-tests-version-4.grpctestingexp.', '--expected_addrs', '5.5.5.5:443,False', '--expected_chosen_service_config', '', '--expected_service_config_error', '', '--expected_lb_policy', '', '--enable_srv_queries', 'True', '--enable_txt_queries', 'True', '--inject_broken_nameserver_list', 'False', '--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port]) current_test_subprocess.communicate() if current_test_subprocess.returncode != 0: num_test_failures += 1 test_runner_log('Run test with target: %s' % 'srv-ipv4-single-target.resolver-tests-version-4.grpctestingexp.') current_test_subprocess = subprocess.Popen([ args.test_bin_path, '--target_name', 'srv-ipv4-single-target.resolver-tests-version-4.grpctestingexp.', '--expected_addrs', '1.2.3.4:1234,True', '--expected_chosen_service_config', '', '--expected_service_config_error', '', '--expected_lb_policy', '', '--enable_srv_queries', 'True', '--enable_txt_queries', 'True', '--inject_broken_nameserver_list', 'False', '--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port]) current_test_subprocess.communicate() if current_test_subprocess.returncode != 0: num_test_failures += 1 test_runner_log('Run test with target: %s' % 'srv-ipv4-multi-target.resolver-tests-version-4.grpctestingexp.') current_test_subprocess = subprocess.Popen([ args.test_bin_path, '--target_name', 'srv-ipv4-multi-target.resolver-tests-version-4.grpctestingexp.', '--expected_addrs', '1.2.3.5:1234,True;1.2.3.6:1234,True;1.2.3.7:1234,True', '--expected_chosen_service_config', '', '--expected_service_config_error', '', '--expected_lb_policy', '', '--enable_srv_queries', 'True', '--enable_txt_queries', 'True', '--inject_broken_nameserver_list', 'False', '--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port]) current_test_subprocess.communicate() if current_test_subprocess.returncode != 0: num_test_failures += 1 test_runner_log('Run test with target: %s' % 'srv-ipv6-single-target.resolver-tests-version-4.grpctestingexp.') current_test_subprocess = subprocess.Popen([ args.test_bin_path, '--target_name', 'srv-ipv6-single-target.resolver-tests-version-4.grpctestingexp.', '--expected_addrs', '[2607:f8b0:400a:801::1001]:1234,True', '--expected_chosen_service_config', '', '--expected_service_config_error', '', '--expected_lb_policy', '', '--enable_srv_queries', 'True', '--enable_txt_queries', 'True', '--inject_broken_nameserver_list', 'False', '--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port]) current_test_subprocess.communicate() if current_test_subprocess.returncode != 0: num_test_failures += 1 test_runner_log('Run test with target: %s' % 'srv-ipv6-multi-target.resolver-tests-version-4.grpctestingexp.') current_test_subprocess = subprocess.Popen([ args.test_bin_path, '--target_name', 'srv-ipv6-multi-target.resolver-tests-version-4.grpctestingexp.', '--expected_addrs', '[2607:f8b0:400a:801::1002]:1234,True;[2607:f8b0:400a:801::1003]:1234,True;[2607:f8b0:400a:801::1004]:1234,True', '--expected_chosen_service_config', '', '--expected_service_config_error', '', '--expected_lb_policy', '', '--enable_srv_queries', 'True', '--enable_txt_queries', 'True', '--inject_broken_nameserver_list', 'False', '--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port]) current_test_subprocess.communicate() if current_test_subprocess.returncode != 0: num_test_failures += 1 test_runner_log('Run test with target: %s' % 'srv-ipv4-simple-service-config.resolver-tests-version-4.grpctestingexp.') current_test_subprocess = subprocess.Popen([ args.test_bin_path, '--target_name', 'srv-ipv4-simple-service-config.resolver-tests-version-4.grpctestingexp.', '--expected_addrs', '1.2.3.4:1234,True', '--expected_chosen_service_config', '{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"SimpleService"}],"waitForReady":true}]}', '--expected_service_config_error', '', '--expected_lb_policy', 'round_robin', '--enable_srv_queries', 'True', '--enable_txt_queries', 'True', '--inject_broken_nameserver_list', 'False', '--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port]) current_test_subprocess.communicate() if current_test_subprocess.returncode != 0: num_test_failures += 1 test_runner_log('Run test with target: %s' % 'ipv4-no-srv-simple-service-config.resolver-tests-version-4.grpctestingexp.') current_test_subprocess = subprocess.Popen([ args.test_bin_path, '--target_name', 'ipv4-no-srv-simple-service-config.resolver-tests-version-4.grpctestingexp.', '--expected_addrs', '1.2.3.4:443,False', '--expected_chosen_service_config', '{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"NoSrvSimpleService"}],"waitForReady":true}]}', '--expected_service_config_error', '', '--expected_lb_policy', 'round_robin', '--enable_srv_queries', 'True', '--enable_txt_queries', 'True', '--inject_broken_nameserver_list', 'False', '--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port]) current_test_subprocess.communicate() if current_test_subprocess.returncode != 0: num_test_failures += 1 test_runner_log('Run test with target: %s' % 'ipv4-no-config-for-cpp.resolver-tests-version-4.grpctestingexp.') current_test_subprocess = subprocess.Popen([ args.test_bin_path, '--target_name', 'ipv4-no-config-for-cpp.resolver-tests-version-4.grpctestingexp.', '--expected_addrs', '1.2.3.4:443,False', '--expected_chosen_service_config', '', '--expected_service_config_error', '', '--expected_lb_policy', '', '--enable_srv_queries', 'True', '--enable_txt_queries', 'True', '--inject_broken_nameserver_list', 'False', '--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port]) current_test_subprocess.communicate() if current_test_subprocess.returncode != 0: num_test_failures += 1 test_runner_log('Run test with target: %s' % 'ipv4-cpp-config-has-zero-percentage.resolver-tests-version-4.grpctestingexp.') current_test_subprocess = subprocess.Popen([ args.test_bin_path, '--target_name', 'ipv4-cpp-config-has-zero-percentage.resolver-tests-version-4.grpctestingexp.', '--expected_addrs', '1.2.3.4:443,False', '--expected_chosen_service_config', '', '--expected_service_config_error', '', '--expected_lb_policy', '', '--enable_srv_queries', 'True', '--enable_txt_queries', 'True', '--inject_broken_nameserver_list', 'False', '--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port]) current_test_subprocess.communicate() if current_test_subprocess.returncode != 0: num_test_failures += 1 test_runner_log('Run test with target: %s' % 'ipv4-second-language-is-cpp.resolver-tests-version-4.grpctestingexp.') current_test_subprocess = subprocess.Popen([ args.test_bin_path, '--target_name', 'ipv4-second-language-is-cpp.resolver-tests-version-4.grpctestingexp.', '--expected_addrs', '1.2.3.4:443,False', '--expected_chosen_service_config', '{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"CppService"}],"waitForReady":true}]}', '--expected_service_config_error', '', '--expected_lb_policy', 'round_robin', '--enable_srv_queries', 'True', '--enable_txt_queries', 'True', '--inject_broken_nameserver_list', 'False', '--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port]) current_test_subprocess.communicate() if current_test_subprocess.returncode != 0: num_test_failures += 1 test_runner_log('Run test with target: %s' % 'ipv4-config-with-percentages.resolver-tests-version-4.grpctestingexp.') current_test_subprocess = subprocess.Popen([ args.test_bin_path, '--target_name', 'ipv4-config-with-percentages.resolver-tests-version-4.grpctestingexp.', '--expected_addrs', '1.2.3.4:443,False', '--expected_chosen_service_config', '{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"AlwaysPickedService"}],"waitForReady":true}]}', '--expected_service_config_error', '', '--expected_lb_policy', 'round_robin', '--enable_srv_queries', 'True', '--enable_txt_queries', 'True', '--inject_broken_nameserver_list', 'False', '--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port]) current_test_subprocess.communicate() if current_test_subprocess.returncode != 0: num_test_failures += 1 test_runner_log('Run test with target: %s' % 'srv-ipv4-target-has-backend-and-balancer.resolver-tests-version-4.grpctestingexp.') current_test_subprocess = subprocess.Popen([ args.test_bin_path, '--target_name', 'srv-ipv4-target-has-backend-and-balancer.resolver-tests-version-4.grpctestingexp.', '--expected_addrs', '1.2.3.4:1234,True;1.2.3.4:443,False', '--expected_chosen_service_config', '', '--expected_service_config_error', '', '--expected_lb_policy', '', '--enable_srv_queries', 'True', '--enable_txt_queries', 'True', '--inject_broken_nameserver_list', 'False', '--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port]) current_test_subprocess.communicate() if current_test_subprocess.returncode != 0: num_test_failures += 1 test_runner_log('Run test with target: %s' % 'srv-ipv6-target-has-backend-and-balancer.resolver-tests-version-4.grpctestingexp.') current_test_subprocess = subprocess.Popen([ args.test_bin_path, '--target_name', 'srv-ipv6-target-has-backend-and-balancer.resolver-tests-version-4.grpctestingexp.', '--expected_addrs', '[2607:f8b0:400a:801::1002]:1234,True;[2607:f8b0:400a:801::1002]:443,False', '--expected_chosen_service_config', '', '--expected_service_config_error', '', '--expected_lb_policy', '', '--enable_srv_queries', 'True', '--enable_txt_queries', 'True', '--inject_broken_nameserver_list', 'False', '--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port]) current_test_subprocess.communicate() if current_test_subprocess.returncode != 0: num_test_failures += 1 test_runner_log('Run test with target: %s' % 'ipv4-config-causing-fallback-to-tcp.resolver-tests-version-4.grpctestingexp.') current_test_subprocess = subprocess.Popen([ args.test_bin_path, '--target_name', 'ipv4-config-causing-fallback-to-tcp.resolver-tests-version-4.grpctestingexp.', '--expected_addrs', '1.2.3.4:443,False', '--expected_chosen_service_config', '{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"SimpleService"}],"waitForReady":true},{"name":[{"method":"FooTwo","service":"SimpleService"}],"waitForReady":true},{"name":[{"method":"FooThree","service":"SimpleService"}],"waitForReady":true},{"name":[{"method":"FooFour","service":"SimpleService"}],"waitForReady":true},{"name":[{"method":"FooFive","service":"SimpleService"}],"waitForReady":true},{"name":[{"method":"FooSix","service":"SimpleService"}],"waitForReady":true},{"name":[{"method":"FooSeven","service":"SimpleService"}],"waitForReady":true},{"name":[{"method":"FooEight","service":"SimpleService"}],"waitForReady":true},{"name":[{"method":"FooNine","service":"SimpleService"}],"waitForReady":true},{"name":[{"method":"FooTen","service":"SimpleService"}],"waitForReady":true},{"name":[{"method":"FooEleven","service":"SimpleService"}],"waitForReady":true},{"name":[{"method":"FooTwelve","service":"SimpleService"}],"waitForReady":true},{"name":[{"method":"FooTwelve","service":"SimpleService"}],"waitForReady":true},{"name":[{"method":"FooTwelve","service":"SimpleService"}],"waitForReady":true},{"name":[{"method":"FooTwelve","service":"SimpleService"}],"waitForReady":true}]}', '--expected_service_config_error', '', '--expected_lb_policy', '', '--enable_srv_queries', 'True', '--enable_txt_queries', 'True', '--inject_broken_nameserver_list', 'False', '--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port]) current_test_subprocess.communicate() if current_test_subprocess.returncode != 0: num_test_failures += 1 test_runner_log('Run test with target: %s' % 'srv-ipv4-single-target-srv-disabled.resolver-tests-version-4.grpctestingexp.') current_test_subprocess = subprocess.Popen([ args.test_bin_path, '--target_name', 'srv-ipv4-single-target-srv-disabled.resolver-tests-version-4.grpctestingexp.', '--expected_addrs', '2.3.4.5:443,False', '--expected_chosen_service_config', '', '--expected_service_config_error', '', '--expected_lb_policy', '', '--enable_srv_queries', 'False', '--enable_txt_queries', 'True', '--inject_broken_nameserver_list', 'False', '--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port]) current_test_subprocess.communicate() if current_test_subprocess.returncode != 0: num_test_failures += 1 test_runner_log('Run test with target: %s' % 'srv-ipv4-multi-target-srv-disabled.resolver-tests-version-4.grpctestingexp.') current_test_subprocess = subprocess.Popen([ args.test_bin_path, '--target_name', 'srv-ipv4-multi-target-srv-disabled.resolver-tests-version-4.grpctestingexp.', '--expected_addrs', '9.2.3.5:443,False;9.2.3.6:443,False;9.2.3.7:443,False', '--expected_chosen_service_config', '', '--expected_service_config_error', '', '--expected_lb_policy', '', '--enable_srv_queries', 'False', '--enable_txt_queries', 'True', '--inject_broken_nameserver_list', 'False', '--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port]) current_test_subprocess.communicate() if current_test_subprocess.returncode != 0: num_test_failures += 1 test_runner_log('Run test with target: %s' % 'srv-ipv6-single-target-srv-disabled.resolver-tests-version-4.grpctestingexp.') current_test_subprocess = subprocess.Popen([ args.test_bin_path, '--target_name', 'srv-ipv6-single-target-srv-disabled.resolver-tests-version-4.grpctestingexp.', '--expected_addrs', '[2600::1001]:443,False', '--expected_chosen_service_config', '', '--expected_service_config_error', '', '--expected_lb_policy', '', '--enable_srv_queries', 'False', '--enable_txt_queries', 'True', '--inject_broken_nameserver_list', 'False', '--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port]) current_test_subprocess.communicate() if current_test_subprocess.returncode != 0: num_test_failures += 1 test_runner_log('Run test with target: %s' % 'srv-ipv6-multi-target-srv-disabled.resolver-tests-version-4.grpctestingexp.') current_test_subprocess = subprocess.Popen([ args.test_bin_path, '--target_name', 'srv-ipv6-multi-target-srv-disabled.resolver-tests-version-4.grpctestingexp.', '--expected_addrs', '[2600::1002]:443,False;[2600::1003]:443,False;[2600::1004]:443,False', '--expected_chosen_service_config', '', '--expected_service_config_error', '', '--expected_lb_policy', '', '--enable_srv_queries', 'False', '--enable_txt_queries', 'True', '--inject_broken_nameserver_list', 'False', '--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port]) current_test_subprocess.communicate() if current_test_subprocess.returncode != 0: num_test_failures += 1 test_runner_log('Run test with target: %s' % 'srv-ipv4-simple-service-config-srv-disabled.resolver-tests-version-4.grpctestingexp.') current_test_subprocess = subprocess.Popen([ args.test_bin_path, '--target_name', 'srv-ipv4-simple-service-config-srv-disabled.resolver-tests-version-4.grpctestingexp.', '--expected_addrs', '5.5.3.4:443,False', '--expected_chosen_service_config', '{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"SimpleService"}],"waitForReady":true}]}', '--expected_service_config_error', '', '--expected_lb_policy', 'round_robin', '--enable_srv_queries', 'False', '--enable_txt_queries', 'True', '--inject_broken_nameserver_list', 'False', '--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port]) current_test_subprocess.communicate() if current_test_subprocess.returncode != 0: num_test_failures += 1 test_runner_log('Run test with target: %s' % 'srv-ipv4-simple-service-config-txt-disabled.resolver-tests-version-4.grpctestingexp.') current_test_subprocess = subprocess.Popen([ args.test_bin_path, '--target_name', 'srv-ipv4-simple-service-config-txt-disabled.resolver-tests-version-4.grpctestingexp.', '--expected_addrs', '1.2.3.4:1234,True', '--expected_chosen_service_config', '', '--expected_service_config_error', '', '--expected_lb_policy', '', '--enable_srv_queries', 'True', '--enable_txt_queries', 'False', '--inject_broken_nameserver_list', 'False', '--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port]) current_test_subprocess.communicate() if current_test_subprocess.returncode != 0: num_test_failures += 1 test_runner_log('Run test with target: %s' % 'ipv4-cpp-config-has-zero-percentage-txt-disabled.resolver-tests-version-4.grpctestingexp.') current_test_subprocess = subprocess.Popen([ args.test_bin_path, '--target_name', 'ipv4-cpp-config-has-zero-percentage-txt-disabled.resolver-tests-version-4.grpctestingexp.', '--expected_addrs', '1.2.3.4:443,False', '--expected_chosen_service_config', '', '--expected_service_config_error', '', '--expected_lb_policy', '', '--enable_srv_queries', 'True', '--enable_txt_queries', 'False', '--inject_broken_nameserver_list', 'False', '--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port]) current_test_subprocess.communicate() if current_test_subprocess.returncode != 0: num_test_failures += 1 test_runner_log('Run test with target: %s' % 'ipv4-second-language-is-cpp-txt-disabled.resolver-tests-version-4.grpctestingexp.') current_test_subprocess = subprocess.Popen([ args.test_bin_path, '--target_name', 'ipv4-second-language-is-cpp-txt-disabled.resolver-tests-version-4.grpctestingexp.', '--expected_addrs', '1.2.3.4:443,False', '--expected_chosen_service_config', '', '--expected_service_config_error', '', '--expected_lb_policy', '', '--enable_srv_queries', 'True', '--enable_txt_queries', 'False', '--inject_broken_nameserver_list', 'False', '--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port]) current_test_subprocess.communicate() if current_test_subprocess.returncode != 0: num_test_failures += 1 test_runner_log('Run test with target: %s' % 'ipv4-svc_cfg_bad_json.resolver-tests-version-4.grpctestingexp.') current_test_subprocess = subprocess.Popen([ args.test_bin_path, '--target_name', 'ipv4-svc_cfg_bad_json.resolver-tests-version-4.grpctestingexp.', '--expected_addrs', '1.2.3.4:443,False', '--expected_chosen_service_config', '', '--expected_service_config_error', 'could not parse', '--expected_lb_policy', '', '--enable_srv_queries', 'True', '--enable_txt_queries', 'True', '--inject_broken_nameserver_list', 'False', '--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port]) current_test_subprocess.communicate() if current_test_subprocess.returncode != 0: num_test_failures += 1 test_runner_log('Run test with target: %s' % 'ipv4-svc_cfg_bad_client_language.resolver-tests-version-4.grpctestingexp.') current_test_subprocess = subprocess.Popen([ args.test_bin_path, '--target_name', 'ipv4-svc_cfg_bad_client_language.resolver-tests-version-4.grpctestingexp.', '--expected_addrs', '1.2.3.4:443,False', '--expected_chosen_service_config', '', '--expected_service_config_error', 'field:clientLanguage error:should be of type array', '--expected_lb_policy', '', '--enable_srv_queries', 'True', '--enable_txt_queries', 'True', '--inject_broken_nameserver_list', 'False', '--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port]) current_test_subprocess.communicate() if current_test_subprocess.returncode != 0: num_test_failures += 1 test_runner_log('Run test with target: %s' % 'ipv4-svc_cfg_bad_percentage.resolver-tests-version-4.grpctestingexp.') current_test_subprocess = subprocess.Popen([ args.test_bin_path, '--target_name', 'ipv4-svc_cfg_bad_percentage.resolver-tests-version-4.grpctestingexp.', '--expected_addrs', '1.2.3.4:443,False', '--expected_chosen_service_config', '', '--expected_service_config_error', 'field:percentage error:should be of type number', '--expected_lb_policy', '', '--enable_srv_queries', 'True', '--enable_txt_queries', 'True', '--inject_broken_nameserver_list', 'False', '--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port]) current_test_subprocess.communicate() if current_test_subprocess.returncode != 0: num_test_failures += 1 test_runner_log('Run test with target: %s' % 'ipv4-svc_cfg_bad_wait_for_ready.resolver-tests-version-4.grpctestingexp.') current_test_subprocess = subprocess.Popen([ args.test_bin_path, '--target_name', 'ipv4-svc_cfg_bad_wait_for_ready.resolver-tests-version-4.grpctestingexp.', '--expected_addrs', '1.2.3.4:443,False', '--expected_chosen_service_config', '', '--expected_service_config_error', 'field:waitForReady error:Type should be true/false', '--expected_lb_policy', '', '--enable_srv_queries', 'True', '--enable_txt_queries', 'True', '--inject_broken_nameserver_list', 'False', '--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port]) current_test_subprocess.communicate() if current_test_subprocess.returncode != 0: num_test_failures += 1 test_runner_log('Run test with target: %s' % 'no-srv-ipv4-single-target-inject-broken-nameservers.resolver-tests-version-4.grpctestingexp.') current_test_subprocess = subprocess.Popen([ args.test_bin_path, '--target_name', 'no-srv-ipv4-single-target-inject-broken-nameservers.resolver-tests-version-4.grpctestingexp.', '--expected_addrs', '5.5.5.5:443,False', '--expected_chosen_service_config', '', '--expected_service_config_error', '', '--expected_lb_policy', '', '--enable_srv_queries', 'True', '--enable_txt_queries', 'True', '--inject_broken_nameserver_list', 'True', '--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port]) current_test_subprocess.communicate() if current_test_subprocess.returncode != 0: num_test_failures += 1 test_runner_log('Run test with target: %s' % 'ipv4-config-causing-fallback-to-tcp-inject-broken-nameservers.resolver-tests-version-4.grpctestingexp.') current_test_subprocess = subprocess.Popen([ args.test_bin_path, '--target_name', 'ipv4-config-causing-fallback-to-tcp-inject-broken-nameservers.resolver-tests-version-4.grpctestingexp.', '--expected_addrs', '1.2.3.4:443,False', '--expected_chosen_service_config', '{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTwo","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooThree","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooFour","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooFive","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooSix","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooSeven","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooEight","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooNine","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTen","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooEleven","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTwelve","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTwelve","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTwelve","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTwelve","service":"SimpleService","waitForReady":true}]}]}', '--expected_service_config_error', 'Service config parsing error', '--expected_lb_policy', '', '--enable_srv_queries', 'True', '--enable_txt_queries', 'True', '--inject_broken_nameserver_list', 'True', '--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port]) current_test_subprocess.communicate() if current_test_subprocess.returncode != 0: num_test_failures += 1 test_runner_log('now kill DNS server') dns_server_subprocess.kill() dns_server_subprocess.wait() test_runner_log('%d tests failed.' % num_test_failures) sys.exit(num_test_failures)
apache-2.0
miniconfig/home-assistant
homeassistant/components/binary_sensor/isy994.py
28
2155
""" Support for ISY994 binary sensors. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/binary_sensor.isy994/ """ import logging from typing import Callable # noqa from homeassistant.components.binary_sensor import BinarySensorDevice, DOMAIN import homeassistant.components.isy994 as isy from homeassistant.const import STATE_ON, STATE_OFF from homeassistant.helpers.typing import ConfigType _LOGGER = logging.getLogger(__name__) VALUE_TO_STATE = { False: STATE_OFF, True: STATE_ON, } UOM = ['2', '78'] STATES = [STATE_OFF, STATE_ON, 'true', 'false'] # pylint: disable=unused-argument def setup_platform(hass, config: ConfigType, add_devices: Callable[[list], None], discovery_info=None): """Setup the ISY994 binary sensor platform.""" if isy.ISY is None or not isy.ISY.connected: _LOGGER.error('A connection has not been made to the ISY controller.') return False devices = [] for node in isy.filter_nodes(isy.SENSOR_NODES, units=UOM, states=STATES): devices.append(ISYBinarySensorDevice(node)) for program in isy.PROGRAMS.get(DOMAIN, []): try: status = program[isy.KEY_STATUS] except (KeyError, AssertionError): pass else: devices.append(ISYBinarySensorProgram(program.name, status)) add_devices(devices) class ISYBinarySensorDevice(isy.ISYDevice, BinarySensorDevice): """Representation of an ISY994 binary sensor device.""" def __init__(self, node) -> None: """Initialize the ISY994 binary sensor device.""" isy.ISYDevice.__init__(self, node) @property def is_on(self) -> bool: """Get whether the ISY994 binary sensor device is on.""" return bool(self.value) class ISYBinarySensorProgram(ISYBinarySensorDevice): """Representation of an ISY994 binary sensor program.""" def __init__(self, name, node) -> None: """Initialize the ISY994 binary sensor program.""" ISYBinarySensorDevice.__init__(self, node) self._name = name
mit
devsar/ae-people
manage.py
1
1219
#!/usr/bin/env python # Copyright 2008 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from appengine_django import InstallAppengineHelperForDjango InstallAppengineHelperForDjango() from django.core.management import execute_manager try: import settings # Assumed to be in the same directory. except ImportError: import sys sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__) sys.exit(1) if __name__ == "__main__": execute_manager(settings)
apache-2.0
thaim/ansible
lib/ansible/modules/network/check_point/cp_publish.py
20
2198
#!/usr/bin/python # -*- coding: utf-8 -*- # # Ansible module to manage Check Point Firewall (c) 2019 # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # from __future__ import (absolute_import, division, print_function) __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = """ --- module: cp_publish short_description: All the changes done by this user will be seen by all users only after publish is called. description: - All the changes done by this user will be seen by all users only after publish is called. All operations are performed over Web Services API. version_added: "2.9" author: "Or Soffer (@chkp-orso)" options: uid: description: - Session unique identifier. Specify it to publish a different session than the one you currently use. type: str extends_documentation_fragment: checkpoint_commands """ EXAMPLES = """ - name: publish cp_publish: """ RETURN = """ cp_publish: description: The checkpoint publish output. returned: always. type: dict """ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.network.checkpoint.checkpoint import checkpoint_argument_spec_for_commands, api_command def main(): argument_spec = dict( uid=dict(type='str') ) argument_spec.update(checkpoint_argument_spec_for_commands) module = AnsibleModule(argument_spec=argument_spec) command = "publish" result = api_command(module, command) module.exit_json(**result) if __name__ == '__main__': main()
mit
Xperia-Nicki/android_kernel_sony_nicki
scripts/rt-tester/rt-tester.py
11005
5307
#!/usr/bin/python # # rt-mutex tester # # (C) 2006 Thomas Gleixner <tglx@linutronix.de> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # import os import sys import getopt import shutil import string # Globals quiet = 0 test = 0 comments = 0 sysfsprefix = "/sys/devices/system/rttest/rttest" statusfile = "/status" commandfile = "/command" # Command opcodes cmd_opcodes = { "schedother" : "1", "schedfifo" : "2", "lock" : "3", "locknowait" : "4", "lockint" : "5", "lockintnowait" : "6", "lockcont" : "7", "unlock" : "8", "signal" : "11", "resetevent" : "98", "reset" : "99", } test_opcodes = { "prioeq" : ["P" , "eq" , None], "priolt" : ["P" , "lt" , None], "priogt" : ["P" , "gt" , None], "nprioeq" : ["N" , "eq" , None], "npriolt" : ["N" , "lt" , None], "npriogt" : ["N" , "gt" , None], "unlocked" : ["M" , "eq" , 0], "trylock" : ["M" , "eq" , 1], "blocked" : ["M" , "eq" , 2], "blockedwake" : ["M" , "eq" , 3], "locked" : ["M" , "eq" , 4], "opcodeeq" : ["O" , "eq" , None], "opcodelt" : ["O" , "lt" , None], "opcodegt" : ["O" , "gt" , None], "eventeq" : ["E" , "eq" , None], "eventlt" : ["E" , "lt" , None], "eventgt" : ["E" , "gt" , None], } # Print usage information def usage(): print "rt-tester.py <-c -h -q -t> <testfile>" print " -c display comments after first command" print " -h help" print " -q quiet mode" print " -t test mode (syntax check)" print " testfile: read test specification from testfile" print " otherwise from stdin" return # Print progress when not in quiet mode def progress(str): if not quiet: print str # Analyse a status value def analyse(val, top, arg): intval = int(val) if top[0] == "M": intval = intval / (10 ** int(arg)) intval = intval % 10 argval = top[2] elif top[0] == "O": argval = int(cmd_opcodes.get(arg, arg)) else: argval = int(arg) # progress("%d %s %d" %(intval, top[1], argval)) if top[1] == "eq" and intval == argval: return 1 if top[1] == "lt" and intval < argval: return 1 if top[1] == "gt" and intval > argval: return 1 return 0 # Parse the commandline try: (options, arguments) = getopt.getopt(sys.argv[1:],'chqt') except getopt.GetoptError, ex: usage() sys.exit(1) # Parse commandline options for option, value in options: if option == "-c": comments = 1 elif option == "-q": quiet = 1 elif option == "-t": test = 1 elif option == '-h': usage() sys.exit(0) # Select the input source if arguments: try: fd = open(arguments[0]) except Exception,ex: sys.stderr.write("File not found %s\n" %(arguments[0])) sys.exit(1) else: fd = sys.stdin linenr = 0 # Read the test patterns while 1: linenr = linenr + 1 line = fd.readline() if not len(line): break line = line.strip() parts = line.split(":") if not parts or len(parts) < 1: continue if len(parts[0]) == 0: continue if parts[0].startswith("#"): if comments > 1: progress(line) continue if comments == 1: comments = 2 progress(line) cmd = parts[0].strip().lower() opc = parts[1].strip().lower() tid = parts[2].strip() dat = parts[3].strip() try: # Test or wait for a status value if cmd == "t" or cmd == "w": testop = test_opcodes[opc] fname = "%s%s%s" %(sysfsprefix, tid, statusfile) if test: print fname continue while 1: query = 1 fsta = open(fname, 'r') status = fsta.readline().strip() fsta.close() stat = status.split(",") for s in stat: s = s.strip() if s.startswith(testop[0]): # Separate status value val = s[2:].strip() query = analyse(val, testop, dat) break if query or cmd == "t": break progress(" " + status) if not query: sys.stderr.write("Test failed in line %d\n" %(linenr)) sys.exit(1) # Issue a command to the tester elif cmd == "c": cmdnr = cmd_opcodes[opc] # Build command string and sys filename cmdstr = "%s:%s" %(cmdnr, dat) fname = "%s%s%s" %(sysfsprefix, tid, commandfile) if test: print fname continue fcmd = open(fname, 'w') fcmd.write(cmdstr) fcmd.close() except Exception,ex: sys.stderr.write(str(ex)) sys.stderr.write("\nSyntax error in line %d\n" %(linenr)) if not test: fd.close() sys.exit(1) # Normal exit pass print "Pass" sys.exit(0)
gpl-2.0
bqbn/addons-server
conftest.py
2
7160
""" pytest hooks and fixtures used for our unittests. Please note that there should not be any Django/Olympia related imports on module-level, they should instead be added to hooks or fixtures directly. """ import os import uuid import pytest import responses @pytest.fixture(autouse=True) def unpin_db(request): """Unpin the database from master in the current DB. The `multidb` middleware pins the current thread to master for 15 seconds after any POST request, which can lead to unexpected results for tests of DB slave functionality.""" from multidb import pinning request.addfinalizer(pinning.unpin_this_thread) @pytest.fixture(autouse=True, scope='class') def mock_elasticsearch(): """Mock ElasticSearch in tests by default. Tests that do need ES should inherit from ESTestCase, which will stop the mock at setup time.""" from olympia.amo.tests import start_es_mocks, stop_es_mocks start_es_mocks() yield stop_es_mocks() @pytest.fixture(autouse=True) def start_responses_mocking(request): """Enable ``responses`` this enforcing us to explicitly mark tests that require internet usage. """ marker = request.node.get_closest_marker('allow_external_http_requests') if not marker: responses.start() yield try: if not marker: responses.stop() responses.reset() except RuntimeError: # responses patcher was already uninstalled pass @pytest.fixture(autouse=True) def mock_basket(settings): """Mock Basket in tests by default. Tests that do need basket to work should disable `responses` and add a passthrough. """ USER_TOKEN = u'13f64f64-1de7-42f6-8c7f-a19e2fae5021' responses.add( responses.GET, settings.BASKET_URL + '/news/lookup-user/', json={'status': 'ok', 'newsletters': [], 'token': USER_TOKEN}) responses.add( responses.POST, settings.BASKET_URL + '/news/subscribe/', json={'status': 'ok', 'token': USER_TOKEN}) responses.add( responses.POST, settings.BASKET_URL + '/news/unsubscribe/{}/'.format(USER_TOKEN), json={'status': 'ok', 'token': USER_TOKEN}) @pytest.fixture(autouse=True) def update_services_db_name_to_follow_test_db_name(db, settings, request): settings.SERVICES_DATABASE['NAME'] = settings.DATABASES['default']['NAME'] def pytest_configure(config): import django # Forcefully call `django.setup`, pytest-django tries to be very lazy # and doesn't call it if it has already been setup. # That is problematic for us since we overwrite our logging config # in settings_test and it can happen that django get's initialized # with the wrong configuration. So let's forcefully re-initialize # to setup the correct logging config since at this point # DJANGO_SETTINGS_MODULE should be `settings_test` every time. django.setup() from olympia.amo.tests import prefix_indexes prefix_indexes(config) @pytest.fixture(autouse=True, scope='session') def instrument_jinja(): """Make sure the "templates" list in a response is properly updated, even though we're using Jinja2 and not the default django template engine.""" import jinja2 from django import test old_render = jinja2.Template.render def instrumented_render(self, *args, **kwargs): context = dict(*args, **kwargs) test.signals.template_rendered.send( sender=self, template=self, context=context) return old_render(self, *args, **kwargs) jinja2.Template.render = instrumented_render def default_prefixer(settings): """Make sure each test starts with a default URL prefixer.""" from django import http from olympia import amo request = http.HttpRequest() request.META['SCRIPT_NAME'] = '' prefixer = amo.urlresolvers.Prefixer(request) prefixer.app = settings.DEFAULT_APP prefixer.locale = settings.LANGUAGE_CODE amo.urlresolvers.set_url_prefix(prefixer) @pytest.fixture(autouse=True) def test_pre_setup(request, tmpdir, settings): from django.core.cache import caches from django.utils import translation from olympia import amo, core from olympia.translations.hold import clean_translations from waffle.utils import get_cache as waffle_get_cache from waffle import models as waffle_models # Clear all cache-instances. They'll be re-initialized by Django # This will make sure that our random `KEY_PREFIX` is applied # appropriately. # This is done by Django too whenever `settings` is changed # directly but because we're using the `settings` fixture # here this is not detected correctly. caches._caches.caches = {} # Randomize the cache key prefix to keep # tests isolated from each other. prefix = uuid.uuid4().hex settings.CACHES['default']['KEY_PREFIX'] = 'amo:{0}:'.format(prefix) # Reset global django-waffle cache instance to make sure it's properly # using our new key prefix waffle_models.cache = waffle_get_cache() translation.trans_real.deactivate() # Django fails to clear this cache. translation.trans_real._translations = {} translation.trans_real.activate(settings.LANGUAGE_CODE) def _path(*args): path = str(os.path.join(*args)) if not os.path.exists(path): os.makedirs(path) return path settings.STORAGE_ROOT = storage_root = _path(str(tmpdir.mkdir('storage'))) settings.SHARED_STORAGE = shared_storage = _path( storage_root, 'shared_storage') settings.ADDONS_PATH = _path(storage_root, 'files') settings.GUARDED_ADDONS_PATH = _path(storage_root, 'guarded-addons') settings.GIT_FILE_STORAGE_PATH = _path(storage_root, 'git-storage') settings.MLBF_STORAGE_PATH = _path(storage_root, 'mlbf') settings.MEDIA_ROOT = _path(shared_storage, 'uploads') settings.TMP_PATH = _path(shared_storage, 'tmp') # Reset the prefixer and urlconf after updating media root default_prefixer(settings) from django.urls import clear_url_caches, set_urlconf def _clear_urlconf(): clear_url_caches() set_urlconf(None) _clear_urlconf() request.addfinalizer(_clear_urlconf) yield core.set_user(None) clean_translations(None) # Make sure queued translations are removed. # Make sure we revert everything we might have changed to prefixers. amo.urlresolvers.clean_url_prefixes() @pytest.fixture def admin_group(db): """Create the Admins group.""" from olympia.access.models import Group return Group.objects.create(name='Admins', rules='*:*') @pytest.fixture def mozilla_user(admin_group, settings): """Create a "Mozilla User".""" from olympia.access.models import GroupUser from olympia.users.models import UserProfile user = UserProfile.objects.create(pk=settings.TASK_USER_ID, email='admin@mozilla.com', username='admin') user.save() GroupUser.objects.create(user=user, group=admin_group) return user
bsd-3-clause
warm200/flask
flask/blueprints.py
8
16905
# -*- coding: utf-8 -*- """ flask.blueprints ~~~~~~~~~~~~~~~~ Blueprints are the recommended way to implement larger or more pluggable applications in Flask 0.7 and later. :copyright: (c) 2015 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ from functools import update_wrapper from .helpers import _PackageBoundObject, _endpoint_from_view_func class BlueprintSetupState(object): """Temporary holder object for registering a blueprint with the application. An instance of this class is created by the :meth:`~flask.Blueprint.make_setup_state` method and later passed to all register callback functions. """ def __init__(self, blueprint, app, options, first_registration): #: a reference to the current application self.app = app #: a reference to the blueprint that created this setup state. self.blueprint = blueprint #: a dictionary with all options that were passed to the #: :meth:`~flask.Flask.register_blueprint` method. self.options = options #: as blueprints can be registered multiple times with the #: application and not everything wants to be registered #: multiple times on it, this attribute can be used to figure #: out if the blueprint was registered in the past already. self.first_registration = first_registration subdomain = self.options.get('subdomain') if subdomain is None: subdomain = self.blueprint.subdomain #: The subdomain that the blueprint should be active for, ``None`` #: otherwise. self.subdomain = subdomain url_prefix = self.options.get('url_prefix') if url_prefix is None: url_prefix = self.blueprint.url_prefix #: The prefix that should be used for all URLs defined on the #: blueprint. self.url_prefix = url_prefix #: A dictionary with URL defaults that is added to each and every #: URL that was defined with the blueprint. self.url_defaults = dict(self.blueprint.url_values_defaults) self.url_defaults.update(self.options.get('url_defaults', ())) def add_url_rule(self, rule, endpoint=None, view_func=None, **options): """A helper method to register a rule (and optionally a view function) to the application. The endpoint is automatically prefixed with the blueprint's name. """ if self.url_prefix: rule = self.url_prefix + rule options.setdefault('subdomain', self.subdomain) if endpoint is None: endpoint = _endpoint_from_view_func(view_func) defaults = self.url_defaults if 'defaults' in options: defaults = dict(defaults, **options.pop('defaults')) self.app.add_url_rule(rule, '%s.%s' % (self.blueprint.name, endpoint), view_func, defaults=defaults, **options) class Blueprint(_PackageBoundObject): """Represents a blueprint. A blueprint is an object that records functions that will be called with the :class:`~flask.blueprints.BlueprintSetupState` later to register functions or other things on the main application. See :ref:`blueprints` for more information. .. versionadded:: 0.7 """ warn_on_modifications = False _got_registered_once = False def __init__(self, name, import_name, static_folder=None, static_url_path=None, template_folder=None, url_prefix=None, subdomain=None, url_defaults=None, root_path=None): _PackageBoundObject.__init__(self, import_name, template_folder, root_path=root_path) self.name = name self.url_prefix = url_prefix self.subdomain = subdomain self.static_folder = static_folder self.static_url_path = static_url_path self.deferred_functions = [] self.view_functions = {} if url_defaults is None: url_defaults = {} self.url_values_defaults = url_defaults def record(self, func): """Registers a function that is called when the blueprint is registered on the application. This function is called with the state as argument as returned by the :meth:`make_setup_state` method. """ if self._got_registered_once and self.warn_on_modifications: from warnings import warn warn(Warning('The blueprint was already registered once ' 'but is getting modified now. These changes ' 'will not show up.')) self.deferred_functions.append(func) def record_once(self, func): """Works like :meth:`record` but wraps the function in another function that will ensure the function is only called once. If the blueprint is registered a second time on the application, the function passed is not called. """ def wrapper(state): if state.first_registration: func(state) return self.record(update_wrapper(wrapper, func)) def make_setup_state(self, app, options, first_registration=False): """Creates an instance of :meth:`~flask.blueprints.BlueprintSetupState` object that is later passed to the register callback functions. Subclasses can override this to return a subclass of the setup state. """ return BlueprintSetupState(self, app, options, first_registration) def register(self, app, options, first_registration=False): """Called by :meth:`Flask.register_blueprint` to register a blueprint on the application. This can be overridden to customize the register behavior. Keyword arguments from :func:`~flask.Flask.register_blueprint` are directly forwarded to this method in the `options` dictionary. """ self._got_registered_once = True state = self.make_setup_state(app, options, first_registration) if self.has_static_folder: state.add_url_rule(self.static_url_path + '/<path:filename>', view_func=self.send_static_file, endpoint='static') for deferred in self.deferred_functions: deferred(state) def route(self, rule, **options): """Like :meth:`Flask.route` but for a blueprint. The endpoint for the :func:`url_for` function is prefixed with the name of the blueprint. """ def decorator(f): endpoint = options.pop("endpoint", f.__name__) self.add_url_rule(rule, endpoint, f, **options) return f return decorator def add_url_rule(self, rule, endpoint=None, view_func=None, **options): """Like :meth:`Flask.add_url_rule` but for a blueprint. The endpoint for the :func:`url_for` function is prefixed with the name of the blueprint. """ if endpoint: assert '.' not in endpoint, "Blueprint endpoints should not contain dots" self.record(lambda s: s.add_url_rule(rule, endpoint, view_func, **options)) def endpoint(self, endpoint): """Like :meth:`Flask.endpoint` but for a blueprint. This does not prefix the endpoint with the blueprint name, this has to be done explicitly by the user of this method. If the endpoint is prefixed with a `.` it will be registered to the current blueprint, otherwise it's an application independent endpoint. """ def decorator(f): def register_endpoint(state): state.app.view_functions[endpoint] = f self.record_once(register_endpoint) return f return decorator def app_template_filter(self, name=None): """Register a custom template filter, available application wide. Like :meth:`Flask.template_filter` but for a blueprint. :param name: the optional name of the filter, otherwise the function name will be used. """ def decorator(f): self.add_app_template_filter(f, name=name) return f return decorator def add_app_template_filter(self, f, name=None): """Register a custom template filter, available application wide. Like :meth:`Flask.add_template_filter` but for a blueprint. Works exactly like the :meth:`app_template_filter` decorator. :param name: the optional name of the filter, otherwise the function name will be used. """ def register_template(state): state.app.jinja_env.filters[name or f.__name__] = f self.record_once(register_template) def app_template_test(self, name=None): """Register a custom template test, available application wide. Like :meth:`Flask.template_test` but for a blueprint. .. versionadded:: 0.10 :param name: the optional name of the test, otherwise the function name will be used. """ def decorator(f): self.add_app_template_test(f, name=name) return f return decorator def add_app_template_test(self, f, name=None): """Register a custom template test, available application wide. Like :meth:`Flask.add_template_test` but for a blueprint. Works exactly like the :meth:`app_template_test` decorator. .. versionadded:: 0.10 :param name: the optional name of the test, otherwise the function name will be used. """ def register_template(state): state.app.jinja_env.tests[name or f.__name__] = f self.record_once(register_template) def app_template_global(self, name=None): """Register a custom template global, available application wide. Like :meth:`Flask.template_global` but for a blueprint. .. versionadded:: 0.10 :param name: the optional name of the global, otherwise the function name will be used. """ def decorator(f): self.add_app_template_global(f, name=name) return f return decorator def add_app_template_global(self, f, name=None): """Register a custom template global, available application wide. Like :meth:`Flask.add_template_global` but for a blueprint. Works exactly like the :meth:`app_template_global` decorator. .. versionadded:: 0.10 :param name: the optional name of the global, otherwise the function name will be used. """ def register_template(state): state.app.jinja_env.globals[name or f.__name__] = f self.record_once(register_template) def before_request(self, f): """Like :meth:`Flask.before_request` but for a blueprint. This function is only executed before each request that is handled by a function of that blueprint. """ self.record_once(lambda s: s.app.before_request_funcs .setdefault(self.name, []).append(f)) return f def before_app_request(self, f): """Like :meth:`Flask.before_request`. Such a function is executed before each request, even if outside of a blueprint. """ self.record_once(lambda s: s.app.before_request_funcs .setdefault(None, []).append(f)) return f def before_app_first_request(self, f): """Like :meth:`Flask.before_first_request`. Such a function is executed before the first request to the application. """ self.record_once(lambda s: s.app.before_first_request_funcs.append(f)) return f def after_request(self, f): """Like :meth:`Flask.after_request` but for a blueprint. This function is only executed after each request that is handled by a function of that blueprint. """ self.record_once(lambda s: s.app.after_request_funcs .setdefault(self.name, []).append(f)) return f def after_app_request(self, f): """Like :meth:`Flask.after_request` but for a blueprint. Such a function is executed after each request, even if outside of the blueprint. """ self.record_once(lambda s: s.app.after_request_funcs .setdefault(None, []).append(f)) return f def teardown_request(self, f): """Like :meth:`Flask.teardown_request` but for a blueprint. This function is only executed when tearing down requests handled by a function of that blueprint. Teardown request functions are executed when the request context is popped, even when no actual request was performed. """ self.record_once(lambda s: s.app.teardown_request_funcs .setdefault(self.name, []).append(f)) return f def teardown_app_request(self, f): """Like :meth:`Flask.teardown_request` but for a blueprint. Such a function is executed when tearing down each request, even if outside of the blueprint. """ self.record_once(lambda s: s.app.teardown_request_funcs .setdefault(None, []).append(f)) return f def context_processor(self, f): """Like :meth:`Flask.context_processor` but for a blueprint. This function is only executed for requests handled by a blueprint. """ self.record_once(lambda s: s.app.template_context_processors .setdefault(self.name, []).append(f)) return f def app_context_processor(self, f): """Like :meth:`Flask.context_processor` but for a blueprint. Such a function is executed each request, even if outside of the blueprint. """ self.record_once(lambda s: s.app.template_context_processors .setdefault(None, []).append(f)) return f def app_errorhandler(self, code): """Like :meth:`Flask.errorhandler` but for a blueprint. This handler is used for all requests, even if outside of the blueprint. """ def decorator(f): self.record_once(lambda s: s.app.errorhandler(code)(f)) return f return decorator def url_value_preprocessor(self, f): """Registers a function as URL value preprocessor for this blueprint. It's called before the view functions are called and can modify the url values provided. """ self.record_once(lambda s: s.app.url_value_preprocessors .setdefault(self.name, []).append(f)) return f def url_defaults(self, f): """Callback function for URL defaults for this blueprint. It's called with the endpoint and values and should update the values passed in place. """ self.record_once(lambda s: s.app.url_default_functions .setdefault(self.name, []).append(f)) return f def app_url_value_preprocessor(self, f): """Same as :meth:`url_value_preprocessor` but application wide. """ self.record_once(lambda s: s.app.url_value_preprocessors .setdefault(None, []).append(f)) return f def app_url_defaults(self, f): """Same as :meth:`url_defaults` but application wide. """ self.record_once(lambda s: s.app.url_default_functions .setdefault(None, []).append(f)) return f def errorhandler(self, code_or_exception): """Registers an error handler that becomes active for this blueprint only. Please be aware that routing does not happen local to a blueprint so an error handler for 404 usually is not handled by a blueprint unless it is caused inside a view function. Another special case is the 500 internal server error which is always looked up from the application. Otherwise works as the :meth:`~flask.Flask.errorhandler` decorator of the :class:`~flask.Flask` object. """ def decorator(f): self.record_once(lambda s: s.app._register_error_handler( self.name, code_or_exception, f)) return f return decorator def register_error_handler(self, code_or_exception, f): """Non-decorator version of the :meth:`errorhandler` error attach function, akin to the :meth:`~flask.Flask.register_error_handler` application-wide function of the :class:`~flask.Flask` object but for error handlers limited to this blueprint. .. versionadded:: 0.11 """ self.record_once(lambda s: s.app._register_error_handler( self.name, code_or_exception, f))
bsd-3-clause
trondhindenes/ansible
test/units/modules/network/f5/test_bigip_monitor_tcp_half_open.py
8
10279
# -*- coding: utf-8 -*- # # Copyright (c) 2017 F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import json import sys import pytest from nose.plugins.skip import SkipTest if sys.version_info < (2, 7): raise SkipTest("F5 Ansible modules require Python >= 2.7") from units.compat import unittest from units.compat.mock import Mock from units.compat.mock import patch from ansible.module_utils.basic import AnsibleModule try: from library.modules.bigip_monitor_tcp_half_open import Parameters from library.modules.bigip_monitor_tcp_half_open import ModuleManager from library.modules.bigip_monitor_tcp_half_open import ArgumentSpec from library.modules.bigip_monitor_tcp_half_open import HAS_F5SDK from library.module_utils.network.f5.common import F5ModuleError from library.module_utils.network.f5.common import iControlUnexpectedHTTPError from test.unit.modules.utils import set_module_args except ImportError: try: from ansible.modules.network.f5.bigip_monitor_tcp_half_open import Parameters from ansible.modules.network.f5.bigip_monitor_tcp_half_open import ModuleManager from ansible.modules.network.f5.bigip_monitor_tcp_half_open import ArgumentSpec from ansible.modules.network.f5.bigip_monitor_tcp_half_open import HAS_F5SDK from ansible.module_utils.network.f5.common import F5ModuleError from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError from units.modules.utils import set_module_args except ImportError: raise SkipTest("F5 Ansible modules require the f5-sdk Python library") fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures') fixture_data = {} def load_fixture(name): path = os.path.join(fixture_path, name) if path in fixture_data: return fixture_data[path] with open(path) as f: data = f.read() try: data = json.loads(data) except Exception: pass fixture_data[path] = data return data class TestParameters(unittest.TestCase): def test_module_parameters(self): args = dict( name='foo', parent='parent', ip='10.10.10.10', port=80, interval=20, timeout=30, time_until_up=60, partition='Common' ) p = Parameters(params=args) assert p.name == 'foo' assert p.parent == '/Common/parent' assert p.ip == '10.10.10.10' assert p.port == 80 assert p.type == 'tcp_half_open' assert p.destination == '10.10.10.10:80' assert p.interval == 20 assert p.timeout == 30 assert p.time_until_up == 60 def test_module_parameters_ints_as_strings(self): args = dict( name='foo', parent='parent', ip='10.10.10.10', port=80, interval='20', timeout='30', time_until_up='60', partition='Common' ) p = Parameters(params=args) assert p.name == 'foo' assert p.parent == '/Common/parent' assert p.ip == '10.10.10.10' assert p.port == 80 assert p.type == 'tcp_half_open' assert p.destination == '10.10.10.10:80' assert p.interval == 20 assert p.timeout == 30 assert p.time_until_up == 60 def test_api_parameters(self): args = dict( name='foo', defaultsFrom='/Common/parent', destination='10.10.10.10:80', interval=20, timeout=30, timeUntilUp=60 ) p = Parameters(params=args) assert p.name == 'foo' assert p.parent == '/Common/parent' assert p.ip == '10.10.10.10' assert p.port == 80 assert p.type == 'tcp_half_open' assert p.destination == '10.10.10.10:80' assert p.interval == 20 assert p.timeout == 30 assert p.time_until_up == 60 class TestManager(unittest.TestCase): def setUp(self): self.spec = ArgumentSpec() def test_create_monitor(self, *args): set_module_args(dict( name='foo', ip='10.10.10.10', port=80, interval=20, timeout=30, time_until_up=60, server='localhost', password='password', user='admin' )) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode ) # Override methods in the specific type of manager mm = ModuleManager(module=module) mm.exists = Mock(side_effect=[False, True]) mm.create_on_device = Mock(return_value=True) results = mm.exec_module() assert results['changed'] is True def test_create_monitor_idempotent(self, *args): set_module_args(dict( name='foo', ip='10.10.10.10', port=80, interval=20, timeout=30, time_until_up=60, server='localhost', password='password', user='admin' )) current = Parameters(params=load_fixture('load_ltm_monitor_tcp_half_open.json')) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode ) # Override methods in the specific type of manager mm = ModuleManager(module=module) mm.exists = Mock(return_value=True) mm.read_current_from_device = Mock(return_value=current) results = mm.exec_module() assert results['changed'] is False def test_update_interval(self, *args): set_module_args(dict( name='foo', interval=10, server='localhost', password='password', user='admin' )) current = Parameters(params=load_fixture('load_ltm_monitor_tcp_half_open.json')) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode ) # Override methods in the specific type of manager mm = ModuleManager(module=module) mm.exists = Mock(return_value=True) mm.read_current_from_device = Mock(return_value=current) mm.update_on_device = Mock(return_value=True) results = mm.exec_module() assert results['changed'] is True assert results['interval'] == 10 def test_update_interval_larger_than_existing_timeout(self, *args): set_module_args(dict( name='foo', interval=30, server='localhost', password='password', user='admin' )) current = Parameters(params=load_fixture('load_ltm_monitor_tcp_half_open.json')) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode ) # Override methods in the specific type of manager mm = ModuleManager(module=module) mm.exists = Mock(return_value=True) mm.read_current_from_device = Mock(return_value=current) mm.update_on_device = Mock(return_value=True) with pytest.raises(F5ModuleError) as ex: mm.exec_module() assert "must be less than" in str(ex) def test_update_interval_larger_than_new_timeout(self, *args): set_module_args(dict( name='foo', interval=10, timeout=5, server='localhost', password='password', user='admin' )) current = Parameters(params=load_fixture('load_ltm_monitor_tcp_half_open.json')) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode ) # Override methods in the specific type of manager mm = ModuleManager(module=module) mm.exists = Mock(return_value=True) mm.read_current_from_device = Mock(return_value=current) mm.update_on_device = Mock(return_value=True) with pytest.raises(F5ModuleError) as ex: mm.exec_module() assert "must be less than" in str(ex) def test_update_timeout(self, *args): set_module_args(dict( name='foo', timeout=300, server='localhost', password='password', user='admin' )) current = Parameters(params=load_fixture('load_ltm_monitor_tcp_half_open.json')) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode ) # Override methods in the specific type of manager mm = ModuleManager(module=module) mm.exists = Mock(return_value=True) mm.read_current_from_device = Mock(return_value=current) mm.update_on_device = Mock(return_value=True) results = mm.exec_module() assert results['changed'] is True assert results['timeout'] == 300 def test_update_time_until_up(self, *args): set_module_args(dict( name='foo', time_until_up=300, server='localhost', password='password', user='admin' )) current = Parameters(params=load_fixture('load_ltm_monitor_tcp_half_open.json')) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode ) # Override methods in the specific type of manager mm = ModuleManager(module=module) mm.exists = Mock(return_value=True) mm.read_current_from_device = Mock(return_value=current) mm.update_on_device = Mock(return_value=True) results = mm.exec_module() assert results['changed'] is True assert results['time_until_up'] == 300
gpl-3.0
LAMAC-IFUNAM/lafrioc-electronics-labjack
Python_LJM/Examples/Stream/ProgContV0.py
2
6604
""" Version 0 This program have the first steps to make one to control a MOT """ from labjack import ljm import time import sys from datetime import datetime import numpy as np MAX_REQUESTS = 4000 # The number of eStreamRead calls that will be performed. # Open first found LabJack handle = ljm.openS("ANY", "ANY", "ANY") #handle = ljm.open(ljm.constants.dtANY, ljm.constants.ctANY, "ANY") info = ljm.getHandleInfo(handle) print("Opened a LabJack with Device type: %i, Connection type: %i,\n" \ "Serial number: %i, IP address: %s, Port: %i,\nMax bytes per MB: %i" % \ (info[0], info[1], info[2], ljm.numberToIP(info[3]), info[4], info[5])) # ### FIO LOW ### #set all FIO digital channels to low state (prevent malfunction of digital channels) statelow=0 for i in range(0,8): num=str(i) chF="FIO"+num ljm.eWriteName(handle, chF, statelow) # ### FIO LOW ### # Setup Stream Out OUT_NAMES = ["FIO_STATE", "DAC1", "DAC0"] NUM_OUT_CHANNELS = len(OUT_NAMES) outAddress = ljm.nameToAddress(OUT_NAMES[0])[0] # FIO ### # Allocate memory for the stream-out buffer ljm.eWriteName(handle, "STREAM_OUT0_TARGET", outAddress) ljm.eWriteName(handle, "STREAM_OUT0_BUFFER_SIZE", 512) ljm.eWriteName(handle, "STREAM_OUT0_ENABLE", 1) matriz = np.array([[1,1,1,1],[0,0,0,0],[1,1,1,1], [0,0,0,0],[1,1,1,1],[0,0,0,0]]) rowsdim=matriz.shape[0] rows=[] for i in range(0,rowsdim): rows.append(''.join(map(str,matriz[i,]))) #each row contain the state of all FIO at a given time # Write values to the stream-out buffer ljm.eWriteName(handle, "STREAM_OUT0_LOOP_SIZE", rowsdim) for r in range(0,rowsdim): ljm.eWriteName(handle, "STREAM_OUT0_BUFFER_U16", int(rows[r],2)) ljm.eWriteName(handle, "STREAM_OUT0_SET_LOOP", 1) print("STREAM_OUT0_BUFFER_STATUS = %f" % (ljm.eReadName(handle, "STREAM_OUT0_BUFFER_STATUS"))) # FIO ### # DAC1 ### outAddress = ljm.nameToAddress(OUT_NAMES[1])[0] # Allocate memory for the stream-out buffer ljm.eWriteName(handle, "STREAM_OUT1_TARGET", outAddress) ljm.eWriteName(handle, "STREAM_OUT1_BUFFER_SIZE", 512) ljm.eWriteName(handle, "STREAM_OUT1_ENABLE", 1) vminD1=0 vmaxD1=5 stepsD1=3 volt_stepD1=(vmaxD1-vminD1)/(stepsD1-1) volt_arrD1=np.linspace(vminD1,vmaxD1,stepsD1) # Write values to the stream-out buffer NumBuf="1" ljm.eWriteName(handle, "STREAM_OUT"+NumBuf+"_LOOP_SIZE", stepsD1) for v1 in range(0,stepsD1): ljm.eWriteName(handle, "STREAM_OUT"+NumBuf+"_BUFFER_F32", volt_arrD1[v1]) ljm.eWriteName(handle, "STREAM_OUT2_SET_LOOP", 1) print("STREAM_OUT1_BUFFER_STATUS = %f" % (ljm.eReadName(handle, "STREAM_OUT1_BUFFER_STATUS"))) # DAC1 ### # DAC0 ### outAddress = ljm.nameToAddress(OUT_NAMES[2])[0] # Allocate memory for the stream-out buffer ljm.eWriteName(handle, "STREAM_OUT2_TARGET", outAddress) ljm.eWriteName(handle, "STREAM_OUT2_BUFFER_SIZE", 512) ljm.eWriteName(handle, "STREAM_OUT2_ENABLE", 1) vminD0=0 vmaxD0=5 stepsD0=3 volt_stepD0=(vmaxD0-vminD0)/(stepsD0-1) volt_arrD0=np.linspace(vminD0,vmaxD0,stepsD0) # Write values to the stream-out buffer NumBuf="2" ljm.eWriteName(handle, "STREAM_OUT"+NumBuf+"_LOOP_SIZE", stepsD0) for v1 in range(0,stepsD0): ljm.eWriteName(handle, "STREAM_OUT"+NumBuf+"_BUFFER_F32", volt_arrD0[v1]) ljm.eWriteName(handle, "STREAM_OUT2_SET_LOOP", 1) print("STREAM_OUT2_BUFFER_STATUS = %f" % (ljm.eReadName(handle, "STREAM_OUT2_BUFFER_STATUS"))) # DAC0 ### # Stream Configuration POS_IN_NAMES = ["AIN0", "AIN1"] NUM_IN_CHANNELS = len(POS_IN_NAMES) TOTAL_NUM_CHANNELS = NUM_IN_CHANNELS + NUM_OUT_CHANNELS #Add positive channels to scan list aScanList = ljm.namesToAddresses(NUM_IN_CHANNELS, POS_IN_NAMES)[0] scanRate = 5000 scansPerRead = 60 # Add the stream out token 4800 to the end aScanList.extend([4800,4801,4802]) try: # Configure the analog inputs' negative channel, range, settling time and # resolution. # Note when streaming, negative channels and ranges can be configured for # individual analog inputs, but the stream has only one settling time and # resolution. aNames = ["AIN_ALL_NEGATIVE_CH", "AIN_ALL_RANGE", "STREAM_SETTLING_US", "STREAM_RESOLUTION_INDEX"] aValues = [ljm.constants.GND, 10.0, 0, 0] #single-ended, +/-10V, 0 (default), #0 (default) ljm.eWriteNames(handle, len(aNames), aNames, aValues) # Configure and start stream print(aScanList[0:TOTAL_NUM_CHANNELS]) scanRate = ljm.eStreamStart(handle, scansPerRead, TOTAL_NUM_CHANNELS, aScanList, scanRate) print("\nStream started with a scan rate of %0.0f Hz." % scanRate) print("\nPerforming %i stream reads." % MAX_REQUESTS) start = datetime.now() totScans = 0 totSkip = 0 # Total skipped samples i = 1 while i <= MAX_REQUESTS: ret = ljm.eStreamRead(handle) # Note that the Python eStreamData will return a data list of size # scansPerRead*TOTAL_NUM_CHANNELS, but only the first # scansPerRead*NUM_IN_CHANNELS samples in the list are valid. Output # channels are not included in the eStreamRead's returned data. data = ret[0][0:scansPerRead*NUM_IN_CHANNELS] scans = len(data)/NUM_IN_CHANNELS totScans += scans # Count the skipped samples which are indicated by -9999 values. Missed # samples occur after a device's stream buffer overflows and are # reported after auto-recover mode ends. curSkip = data.count(-9999.0) totSkip += curSkip print("\neStreamRead #%i, %i scans" % (i, scans)) for j in range(0, scansPerRead): ainStr = "" for k in range(0, NUM_IN_CHANNELS): ainStr += "%s: %0.5f, " % (POS_IN_NAMES[k], data[j*NUM_IN_CHANNELS + k]) print(" %s" % (ainStr)) print(" Scans Skipped = %0.0f, Scan Backlogs: Device = %i, LJM = " \ "%i" % (curSkip/NUM_IN_CHANNELS, ret[1], ret[2])) i += 1 end = datetime.now() print("\nTotal scans = %i" % (totScans)) tt = (end-start).seconds + float((end-start).microseconds)/1000000 print("Time taken = %f seconds" % (tt)) print("LJM Scan Rate = %f scans/second" % (scanRate)) print("Timed Scan Rate = %f scans/second" % (totScans/tt)) print("Timed Sample Rate = %f samples/second" % (totScans*NUM_IN_CHANNELS/tt)) print("Skipped scans = %0.0f" % (totSkip/NUM_IN_CHANNELS)) except ljm.LJMError: ljme = sys.exc_info()[1] print(ljme) except Exception: e = sys.exc_info()[1] print(e) print("\nStop Stream") ljm.eStreamStop(handle) # Close handle ljm.close(handle)
gpl-3.0
elmadjian/pcs5735
aula1/logistic_regression.py
1
3369
#Author: Carlos Eduardo Leão Elmadjian import sys import matplotlib.pyplot as plt import matplotlib.patches as mpatches import numpy as np theta_list = [] x_list = [] y_list = [] def main(): if len(sys.argv) != 2: print("modo de usar: <este_programa> <arquivo_csv>") sys.exit() csv_file = sys.argv[1] with open(csv_file, "r") as arquivo: classes = arquivo.readline().split(",") theta_list = [0.0 for i in range(len(classes))] for line in arquivo: values = line.split(",") curr_x = [float(i) for i in values[:-1]] curr_x.append(1.0) x_list.append(curr_x) y_list.append(1.0) if values[-1].startswith("yes") else y_list.append(0.0) logistic_regression(theta_list, x_list, y_list, 0.0005, 0.0000001) plot(theta_list, x_list, y_list) #The logistic regression algorithm using SGD #------------------------------------------- def logistic_regression(theta_list, x_list, y_list, alpha, epsilon): J_prev = 0 J_curr = J(theta_list, x_list, y_list) count = 0 while abs(J_curr - J_prev) > epsilon: if count == 10000: print("too much iterations") break count += 1 for j in range(len(theta_list)): for i in range(len(x_list)): diff = (h_theta(theta_list, x_list[i]) - y_list[i]) theta_list[j] = theta_list[j] - alpha * diff * x_list[i][j] J_prev = J_curr J_curr = J(theta_list, x_list, y_list) #Calculates the minimum cost function #------------------------------------ def J(theta_list, x_list, y_list): sigma = 0 for i in range(len(x_list)): sigma += (h_theta(theta_list, x_list[i]) - y_list[i])**2 return sigma / 2 #Calculates h_theta #------------------- def h_theta(theta, x): return 1.0/(1.0 + np.exp(-np.dot(theta, x))) #Binary classifier #------------------ def predict(theta, x, y): return (h_theta(theta, x)**y) * ((1.0-h_theta(theta, x))**(1.0-y)) #DEBUG: Plot our findings #------------------------ def plot(theta_list, x_list, y_list): new_x_list = [i[0] for i in x_list] new_y_list = [i[1] for i in x_list] hit, p1, p2, p3, p4 = 0, 0, 0, 0, 0 for i in range(len(y_list)): if y_list[i] == 1.0: if predict(theta_list, x_list[i], y_list[i]) >= 0.5: p1, = plt.plot(np.dot(theta_list, x_list[i]), y_list[i], 'go') hit += 1 else: p2, = plt.plot(np.dot(theta_list, x_list[i]), y_list[i], 'gx') elif y_list[i] == 0.0 : if predict(theta_list, x_list[i], y_list[i]) >= 0.5: p3, = plt.plot(np.dot(theta_list, x_list[i]), y_list[i], 'ro') hit += 1 else: p4, = plt.plot(np.dot(theta_list, x_list[i]), y_list[i], 'rx') plt.title("Regressão logística sobre os dados de 'students.csv'") plt.xlabel("z") plt.ylabel("g(z)") hit_true = 'P(y=admitido) = admitido' hit_false = 'P(y=admitido) = não admitido' miss_true = 'P(y=não admitido) = não admitido' miss_false ='P(y=não admitido) = admitido' plt.legend([p1,p2,p3,p4],[hit_true, hit_false, miss_true, miss_false]) print("hit rate:", hit/len(y_list)) plt.show() #----------------------- if __name__=="__main__": main()
mpl-2.0
jonathf/chaospy
chaospy/distributions/collection/log_normal.py
1
3933
"""Log-Normal probability distribution.""" import numpy from scipy import special from ..baseclass import SimpleDistribution, ShiftScaleDistribution class log_normal(SimpleDistribution): def __init__(self, a=1): super(log_normal, self).__init__(dict(a=a)) def _lower(self, a): return 0. def _upper(self, a): return numpy.e**(a*6.37) def _pdf(self, x, a): out = (numpy.e**(-numpy.log(x+(1-x)*(x<=0))**2/(2*a*a))/ ((x+(1-x)*(x<=0))*a*numpy.sqrt(2*numpy.pi))*(x>0)) return out def _cdf(self, x, a): return special.ndtr(numpy.log(x+(1-x)*(x<=0))/a)*(x>0) def _ppf(self, x, a): return numpy.e**(a*special.ndtri(x)) def _mom(self, k, a): return numpy.e**(.5*a*a*k*k) def _ttr(self, n, a): """Stieltjes-Wigert.""" return ( (numpy.e**(n*a*a)*(numpy.e**(a*a)+1)-1)*numpy.e**(.5*(2*n-1)*a*a), (numpy.e**(n*a*a)-1)*numpy.e**((3*n-2)*a*a) ) class LogNormal(ShiftScaleDistribution): R""" Log-normal distribution Args: mu (float, Distribution): Mean in the normal distribution. Overlaps with scale by mu=log(scale) sigma (float, Distribution): Standard deviation of the normal distribution. shift (float, Distribution): Location of the lower bound. scale (float, Distribution): Scale parameter. Overlaps with mu by scale=e**mu Examples: >>> distribution = chaospy.LogNormal(0, 0.1) >>> distribution LogNormal(mu=0, sigma=0.1) >>> uloc = numpy.linspace(0, 1, 6) >>> uloc array([0. , 0.2, 0.4, 0.6, 0.8, 1. ]) >>> xloc = distribution.inv(uloc) >>> xloc.round(3) array([0. , 0.919, 0.975, 1.026, 1.088, 1.891]) >>> numpy.allclose(distribution.fwd(xloc), uloc) True >>> distribution.pdf(xloc).round(3) array([0. , 3.045, 3.963, 3.767, 2.574, 0. ]) >>> distribution.sample(4).round(3) array([1.04 , 0.887, 1.179, 0.996]) >>> distribution.mom(1).round(3) 1.005 >>> distribution.ttr([0, 1, 2, 3]).round(3) array([[1.005, 1.035, 1.067, 1.098], [0. , 0.01 , 0.021, 0.033]]) """ def __init__(self, mu=0, sigma=1, shift=0, scale=1): dist = ShiftScaleDistribution(dist=log_normal(sigma), scale=numpy.e**mu) super(LogNormal, self).__init__( dist=dist, scale=scale, shift=shift, repr_args=["mu=%s" % mu, "sigma=%s" % sigma], ) class Gilbrat(ShiftScaleDistribution): """ Gilbrat distribution. Standard log-normal distribution Args: scale (float, Distribution): Scaling parameter shift (float, Distribution): Location parameter Examples: >>> distribution = chaospy.Gilbrat(scale=0.0015) >>> distribution Gilbrat(scale=0.0015) >>> uloc = numpy.linspace(0, 1, 6) >>> uloc array([0. , 0.2, 0.4, 0.6, 0.8, 1. ]) >>> xloc = distribution.inv(uloc) >>> xloc.round(3) array([0. , 0.001, 0.001, 0.002, 0.003, 0.876]) >>> numpy.allclose(distribution.fwd(xloc), uloc) True >>> distribution.pdf(xloc).round(3) array([ 0. , 433.031, 331.825, 199.919, 80.444, 0. ]) >>> distribution.sample(4).round(4) array([0.0022, 0.0005, 0.0078, 0.0014]) >>> distribution.mom(1).round(8) 0.00247308 >>> distribution.ttr([0, 1, 2]).round(4) array([[0.0025, 0.0225, 0.178 ], [0. , 0. , 0.0008]]) """ def __init__(self, scale=1, shift=0): super(Gilbrat, self).__init__( dist=log_normal(1), scale=scale, shift=shift, repr_args=[], )
mit
orionzhou/rgeneclust
prepare.pfam.py
1
1906
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import sys import os.path as op import argparse import urllib2 from urlparse import urlparse from ftplib import FTP def internet_on(): try: resp = urllib2.urlopen("http://www.google.com/", timeout = 3) return True except urllib2.URLError as err: pass return False def download_from_url(url, fo): if not internet_on(): print "no internet connection" sys.exit(1) o = urlparse(url) fname = op.basename(o.path) if(o.scheme == 'ftp'): print "downloading " + url ftp = FTP(o.netloc) ftp.login() ftp.cwd(op.dirname(o.path)) ftp.retrbinary('RETR '+fname, open(fo, 'wb').write) else: print "%s: scheme not supported" return fo def uncompress(f_gzipped): fo, ext = op.splitext(f_gzipped) if(ext == '.gz'): print "uncompressing " + f_gzipped os.system("gunzip " + f_gzipped) else: print "not a valid gzip file: " + f_gzipped sys.exit(1) return fo def hmmpress(f_hmm): fname, ext = op.splitext(f_hmm) if ext == '.hmm': print "prepare HMM database for scan..." os.system("hmmpress " + f_hmm) else: print "not a valide HMM file: " + f_hmm sys.exit(1) if __name__ == '__main__': parser = argparse.ArgumentParser( description = 'download and prepare Pfam database for hmmscan' ) parser.add_argument( 'out', default = "test", help = 'output directory (default: "test")' ) args = parser.parse_args() dirw = args.out if not op.exists(dirw): os.makedirs(dirw) url = 'ftp://ftp.ebi.ac.uk/pub/databases/Pfam/current_release/Pfam-A.hmm.gz' f_pfam_gz = op.join(dirw, op.basename(url)) download_from_url(url, f_pfam_gz) f_pfam = op.splitext(f_pfam_gz)[0] uncompress(f_pfam_gz) hmmpress(f_pfam)
apache-2.0
ngpestelos/ansible
test/integration/cleanup_rax.py
229
6516
#!/usr/bin/env python import os import re import yaml import argparse try: import pyrax HAS_PYRAX = True except ImportError: HAS_PYRAX = False def rax_list_iterator(svc, *args, **kwargs): method = kwargs.pop('method', 'list') items = getattr(svc, method)(*args, **kwargs) while items: retrieved = getattr(svc, method)(*args, marker=items[-1].id, **kwargs) if items and retrieved and items[-1].id == retrieved[0].id: del items[-1] items.extend(retrieved) if len(retrieved) < 2: break return items def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('-y', '--yes', action='store_true', dest='assumeyes', default=False, help="Don't prompt for confirmation") parser.add_argument('--match', dest='match_re', default='^ansible-testing', help='Regular expression used to find resources ' '(default: %(default)s)') return parser.parse_args() def authenticate(): try: with open(os.path.realpath('./credentials.yml')) as f: credentials = yaml.load(f) except Exception as e: raise SystemExit(e) try: pyrax.set_credentials(credentials.get('rackspace_username'), credentials.get('rackspace_api_key')) except Exception as e: raise SystemExit(e) def prompt_and_delete(item, prompt, assumeyes): if not assumeyes: assumeyes = raw_input(prompt).lower() == 'y' assert hasattr(item, 'delete') or hasattr(item, 'terminate'), \ "Class <%s> has no delete or terminate attribute" % item.__class__ if assumeyes: if hasattr(item, 'delete'): item.delete() print ("Deleted %s" % item) if hasattr(item, 'terminate'): item.terminate() print ("Terminated %s" % item) def delete_rax(args): """Function for deleting CloudServers""" print ("--- Cleaning CloudServers matching '%s'" % args.match_re) search_opts = dict(name='^%s' % args.match_re) for region in pyrax.identity.services.compute.regions: cs = pyrax.connect_to_cloudservers(region=region) servers = rax_list_iterator(cs.servers, search_opts=search_opts) for server in servers: prompt_and_delete(server, 'Delete matching %s? [y/n]: ' % server, args.assumeyes) def delete_rax_clb(args): """Function for deleting Cloud Load Balancers""" print ("--- Cleaning Cloud Load Balancers matching '%s'" % args.match_re) for region in pyrax.identity.services.load_balancer.regions: clb = pyrax.connect_to_cloud_loadbalancers(region=region) for lb in rax_list_iterator(clb): if re.search(args.match_re, lb.name): prompt_and_delete(lb, 'Delete matching %s? [y/n]: ' % lb, args.assumeyes) def delete_rax_keypair(args): """Function for deleting Rackspace Key pairs""" print ("--- Cleaning Key Pairs matching '%s'" % args.match_re) for region in pyrax.identity.services.compute.regions: cs = pyrax.connect_to_cloudservers(region=region) for keypair in cs.keypairs.list(): if re.search(args.match_re, keypair.name): prompt_and_delete(keypair, 'Delete matching %s? [y/n]: ' % keypair, args.assumeyes) def delete_rax_network(args): """Function for deleting Cloud Networks""" print ("--- Cleaning Cloud Networks matching '%s'" % args.match_re) for region in pyrax.identity.services.network.regions: cnw = pyrax.connect_to_cloud_networks(region=region) for network in cnw.list(): if re.search(args.match_re, network.name): prompt_and_delete(network, 'Delete matching %s? [y/n]: ' % network, args.assumeyes) def delete_rax_cbs(args): """Function for deleting Cloud Networks""" print ("--- Cleaning Cloud Block Storage matching '%s'" % args.match_re) for region in pyrax.identity.services.network.regions: cbs = pyrax.connect_to_cloud_blockstorage(region=region) for volume in cbs.list(): if re.search(args.match_re, volume.name): prompt_and_delete(volume, 'Delete matching %s? [y/n]: ' % volume, args.assumeyes) def delete_rax_cdb(args): """Function for deleting Cloud Databases""" print ("--- Cleaning Cloud Databases matching '%s'" % args.match_re) for region in pyrax.identity.services.database.regions: cdb = pyrax.connect_to_cloud_databases(region=region) for db in rax_list_iterator(cdb): if re.search(args.match_re, db.name): prompt_and_delete(db, 'Delete matching %s? [y/n]: ' % db, args.assumeyes) def _force_delete_rax_scaling_group(manager): def wrapped(uri): manager.api.method_delete('%s?force=true' % uri) return wrapped def delete_rax_scaling_group(args): """Function for deleting Autoscale Groups""" print ("--- Cleaning Autoscale Groups matching '%s'" % args.match_re) for region in pyrax.identity.services.autoscale.regions: asg = pyrax.connect_to_autoscale(region=region) for group in rax_list_iterator(asg): if re.search(args.match_re, group.name): group.manager._delete = \ _force_delete_rax_scaling_group(group.manager) prompt_and_delete(group, 'Delete matching %s? [y/n]: ' % group, args.assumeyes) def main(): if not HAS_PYRAX: raise SystemExit('The pyrax python module is required for this script') args = parse_args() authenticate() funcs = [f for n, f in globals().items() if n.startswith('delete_rax')] for func in sorted(funcs, key=lambda f: f.__name__): try: func(args) except Exception as e: print ("---- %s failed (%s)" % (func.__name__, e.message)) if __name__ == '__main__': try: main() except KeyboardInterrupt: print ('\nExiting...')
gpl-3.0
hynekcer/django
django/contrib/contenttypes/admin.py
191
5385
from __future__ import unicode_literals from functools import partial from django.contrib.admin.checks import InlineModelAdminChecks from django.contrib.admin.options import InlineModelAdmin, flatten_fieldsets from django.contrib.contenttypes.fields import GenericForeignKey from django.contrib.contenttypes.forms import ( BaseGenericInlineFormSet, generic_inlineformset_factory, ) from django.core import checks from django.core.exceptions import FieldDoesNotExist from django.forms import ALL_FIELDS from django.forms.models import modelform_defines_fields class GenericInlineModelAdminChecks(InlineModelAdminChecks): def _check_exclude_of_parent_model(self, obj, parent_model): # There's no FK to exclude, so no exclusion checks are required. return [] def _check_relation(self, obj, parent_model): # There's no FK, but we do need to confirm that the ct_field and ct_fk_field are valid, # and that they are part of a GenericForeignKey. gfks = [ f for f in obj.model._meta.virtual_fields if isinstance(f, GenericForeignKey) ] if len(gfks) == 0: return [ checks.Error( "'%s.%s' has no GenericForeignKey." % ( obj.model._meta.app_label, obj.model._meta.object_name ), hint=None, obj=obj.__class__, id='admin.E301' ) ] else: # Check that the ct_field and ct_fk_fields exist try: obj.model._meta.get_field(obj.ct_field) except FieldDoesNotExist: return [ checks.Error( "'ct_field' references '%s', which is not a field on '%s.%s'." % ( obj.ct_field, obj.model._meta.app_label, obj.model._meta.object_name ), hint=None, obj=obj.__class__, id='admin.E302' ) ] try: obj.model._meta.get_field(obj.ct_fk_field) except FieldDoesNotExist: return [ checks.Error( "'ct_fk_field' references '%s', which is not a field on '%s.%s'." % ( obj.ct_fk_field, obj.model._meta.app_label, obj.model._meta.object_name ), hint=None, obj=obj.__class__, id='admin.E303' ) ] # There's one or more GenericForeignKeys; make sure that one of them # uses the right ct_field and ct_fk_field. for gfk in gfks: if gfk.ct_field == obj.ct_field and gfk.fk_field == obj.ct_fk_field: return [] return [ checks.Error( "'%s.%s' has no GenericForeignKey using content type field '%s' and object ID field '%s'." % ( obj.model._meta.app_label, obj.model._meta.object_name, obj.ct_field, obj.ct_fk_field ), hint=None, obj=obj.__class__, id='admin.E304' ) ] class GenericInlineModelAdmin(InlineModelAdmin): ct_field = "content_type" ct_fk_field = "object_id" formset = BaseGenericInlineFormSet checks_class = GenericInlineModelAdminChecks def get_formset(self, request, obj=None, **kwargs): if 'fields' in kwargs: fields = kwargs.pop('fields') else: fields = flatten_fieldsets(self.get_fieldsets(request, obj)) if self.exclude is None: exclude = [] else: exclude = list(self.exclude) exclude.extend(self.get_readonly_fields(request, obj)) if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude: # Take the custom ModelForm's Meta.exclude into account only if the # GenericInlineModelAdmin doesn't define its own. exclude.extend(self.form._meta.exclude) exclude = exclude or None can_delete = self.can_delete and self.has_delete_permission(request, obj) defaults = { "ct_field": self.ct_field, "fk_field": self.ct_fk_field, "form": self.form, "formfield_callback": partial(self.formfield_for_dbfield, request=request), "formset": self.formset, "extra": self.get_extra(request, obj), "can_delete": can_delete, "can_order": False, "fields": fields, "min_num": self.get_min_num(request, obj), "max_num": self.get_max_num(request, obj), "exclude": exclude } defaults.update(kwargs) if defaults['fields'] is None and not modelform_defines_fields(defaults['form']): defaults['fields'] = ALL_FIELDS return generic_inlineformset_factory(self.model, **defaults) class GenericStackedInline(GenericInlineModelAdmin): template = 'admin/edit_inline/stacked.html' class GenericTabularInline(GenericInlineModelAdmin): template = 'admin/edit_inline/tabular.html'
bsd-3-clause
tunneln/CarnotKE
jyhton/Lib/test/test_mutants.py
24
8766
# for Jython's use of CHM, removed pathological case of # machiavelli. this may or may not legal without some locking in the # underlying java code, in any event, it's not so interesting right # now from test.test_support import verbose, TESTFN import random import os # From SF bug #422121: Insecurities in dict comparison. # Safety of code doing comparisons has been an historical Python weak spot. # The problem is that comparison of structures written in C *naturally* # wants to hold on to things like the size of the container, or "the # biggest" containee so far, across a traversal of the container; but # code to do containee comparisons can call back into Python and mutate # the container in arbitrary ways while the C loop is in midstream. If the # C code isn't extremely paranoid about digging things out of memory on # each trip, and artificially boosting refcounts for the duration, anything # from infinite loops to OS crashes can result (yes, I use Windows <wink>). # # The other problem is that code designed to provoke a weakness is usually # white-box code, and so catches only the particular vulnerabilities the # author knew to protect against. For example, Python's list.sort() code # went thru many iterations as one "new" vulnerability after another was # discovered. # # So the dict comparison test here uses a black-box approach instead, # generating dicts of various sizes at random, and performing random # mutations on them at random times. This proved very effective, # triggering at least six distinct failure modes the first 20 times I # ran it. Indeed, at the start, the driver never got beyond 6 iterations # before the test died. # The dicts are global to make it easy to mutate tham from within functions. dict1 = {} dict2 = {} # The current set of keys in dict1 and dict2. These are materialized as # lists to make it easy to pick a dict key at random. dict1keys = [] dict2keys = [] # Global flag telling maybe_mutate() wether to *consider* mutating. mutate = 0 # If global mutate is true, consider mutating a dict. May or may not # mutate a dict even if mutate is true. If it does decide to mutate a # dict, it picks one of {dict1, dict2} at random, and deletes a random # entry from it; or, more rarely, adds a random element. def maybe_mutate(): global mutate if not mutate: return if random.random() < 0.5: return if random.random() < 0.5: target, keys = dict1, dict1keys else: target, keys = dict2, dict2keys if random.random() < 0.2: # Insert a new key. mutate = 0 # disable mutation until key inserted while 1: newkey = Horrid(random.randrange(100)) if newkey not in target: break target[newkey] = Horrid(random.randrange(100)) keys.append(newkey) mutate = 1 elif keys: # Delete a key at random. i = random.randrange(len(keys)) key = keys[i] del target[key] # CAUTION: don't use keys.remove(key) here. Or do <wink>. The # point is that .remove() would trigger more comparisons, and so # also more calls to this routine. We're mutating often enough # without that. del keys[i] # A horrid class that triggers random mutations of dict1 and dict2 when # instances are compared. class Horrid: def __init__(self, i): # Comparison outcomes are determined by the value of i. self.i = i # An artificial hashcode is selected at random so that we don't # have any systematic relationship between comparison outcomes # (based on self.i and other.i) and relative position within the # hash vector (based on hashcode). self.hashcode = random.randrange(1000000000) def __hash__(self): return self.hashcode def __cmp__(self, other): maybe_mutate() # The point of the test. return cmp(self.i, other.i) def __repr__(self): return "Horrid(%d)" % self.i # Fill dict d with numentries (Horrid(i), Horrid(j)) key-value pairs, # where i and j are selected at random from the candidates list. # Return d.keys() after filling. def fill_dict(d, candidates, numentries): d.clear() for i in xrange(numentries): d[Horrid(random.choice(candidates))] = \ Horrid(random.choice(candidates)) return d.keys() # Test one pair of randomly generated dicts, each with n entries. # Note that dict comparison is trivial if they don't have the same number # of entires (then the "shorter" dict is instantly considered to be the # smaller one, without even looking at the entries). def test_one(n): global mutate, dict1, dict2, dict1keys, dict2keys # Fill the dicts without mutating them. mutate = 0 dict1keys = fill_dict(dict1, range(n), n) dict2keys = fill_dict(dict2, range(n), n) # Enable mutation, then compare the dicts so long as they have the # same size. mutate = 1 if verbose: print "trying w/ lengths", len(dict1), len(dict2), while dict1 and len(dict1) == len(dict2): if verbose: print ".", c = cmp(dict1, dict2) if verbose: print # Run test_one n times. At the start (before the bugs were fixed), 20 # consecutive runs of this test each blew up on or before the sixth time # test_one was run. So n doesn't have to be large to get an interesting # test. # OTOH, calling with large n is also interesting, to ensure that the fixed # code doesn't hold on to refcounts *too* long (in which case memory would # leak). def test(n): for i in xrange(n): test_one(random.randrange(1, 100)) # See last comment block for clues about good values for n. test(100) ########################################################################## # Another segfault bug, distilled by Michael Hudson from a c.l.py post. class Child: def __init__(self, parent): self.__dict__['parent'] = parent def __getattr__(self, attr): self.parent.a = 1 self.parent.b = 1 self.parent.c = 1 self.parent.d = 1 self.parent.e = 1 self.parent.f = 1 self.parent.g = 1 self.parent.h = 1 self.parent.i = 1 return getattr(self.parent, attr) class Parent: def __init__(self): self.a = Child(self) # Hard to say what this will print! May vary from time to time. But # we're specifically trying to test the tp_print slot here, and this is # the clearest way to do it. We print the result to a temp file so that # the expected-output file doesn't need to change. f = open(TESTFN, "w") print >> f, Parent().__dict__ f.close() os.unlink(TESTFN) ########################################################################## # And another core-dumper from Michael Hudson. dict = {} # Force dict to malloc its table. for i in range(1, 10): dict[i] = i f = open(TESTFN, "w") class Machiavelli: def __repr__(self): dict.clear() # Michael sez: "doesn't crash without this. don't know why." # Tim sez: "luck of the draw; crashes with or without for me." print >> f return `"machiavelli"` def __hash__(self): return 0 # zyasoft - this currently crashes with CHM implementation of dict; # need to figure out why this is the case, but it does looks quite # evil, doesn't it? #dict[Machiavelli()] = Machiavelli() print >> f, str(dict) f.close() os.unlink(TESTFN) del f, dict ########################################################################## # And another core-dumper from Michael Hudson. dict = {} # let's force dict to malloc its table for i in range(1, 10): dict[i] = i class Machiavelli2: def __eq__(self, other): dict.clear() return 1 def __hash__(self): return 0 dict[Machiavelli2()] = Machiavelli2() try: dict[Machiavelli2()] except KeyError: pass del dict ########################################################################## # And another core-dumper from Michael Hudson. dict = {} # let's force dict to malloc its table for i in range(1, 10): dict[i] = i class Machiavelli3: def __init__(self, id): self.id = id def __eq__(self, other): if self.id == other.id: dict.clear() return 1 else: return 0 def __repr__(self): return "%s(%s)"%(self.__class__.__name__, self.id) def __hash__(self): return 0 dict[Machiavelli3(1)] = Machiavelli3(0) dict[Machiavelli3(2)] = Machiavelli3(0) f = open(TESTFN, "w") try: try: print >> f, dict[Machiavelli3(2)] except KeyError: pass finally: f.close() os.unlink(TESTFN) del dict
apache-2.0
eviljeff/olympia
src/olympia/lib/es/utils.py
9
3021
import datetime import os from copy import deepcopy from django.conf import settings from django.core.management.base import CommandError from elasticsearch import helpers import olympia.core.logger from olympia.amo import search as amo_search from .models import Reindexing log = olympia.core.logger.getLogger('z.es') # shortcut functions is_reindexing_amo = Reindexing.objects.is_reindexing_amo flag_reindexing_amo = Reindexing.objects.flag_reindexing_amo unflag_reindexing_amo = Reindexing.objects.unflag_reindexing_amo get_indices = Reindexing.objects.get_indices def index_objects(ids, model, extract_func, index=None, transforms=None, objects=None): if index is None: index = model._get_index() if objects is None: objects = model.objects indices = Reindexing.objects.get_indices(index) if transforms is None: transforms = [] qs = objects.filter(id__in=ids) for t in transforms: qs = qs.transform(t) bulk = [] for ob in qs: data = extract_func(ob) for index in indices: bulk.append({ "_source": data, "_id": ob.id, "_type": ob.get_mapping_type(), "_index": index }) es = amo_search.get_es() return helpers.bulk(es, bulk) def raise_if_reindex_in_progress(site): """Checks if the database indexation flag is on for the given site. If it's on, and if no "FORCE_INDEXING" variable is present in the env, raises a CommandError. """ already_reindexing = Reindexing.objects._is_reindexing(site) if already_reindexing and 'FORCE_INDEXING' not in os.environ: raise CommandError("Indexation already occurring. Add a " "FORCE_INDEXING variable in the environ " "to force it") def timestamp_index(index): """Return index-YYYYMMDDHHMMSS with the current time.""" return '%s-%s' % (index, datetime.datetime.now().strftime('%Y%m%d%H%M%S')) def create_index(index, config=None): """Create an index if it's not present. Return the index name. Options: - index: name of the index. - config: if provided, used when passing the configuration of the index to ES. """ es = amo_search.get_es() if config is None: config = {} if 'settings' not in config: config['settings'] = { 'index': {} } else: # Make a deepcopy of the settings in the config that was passed, so # that we can modify it freely to add shards and replicas settings. config['settings'] = deepcopy(config['settings']) config['settings']['index'].update({ 'number_of_shards': settings.ES_DEFAULT_NUM_SHARDS, 'number_of_replicas': settings.ES_DEFAULT_NUM_REPLICAS, 'max_result_window': settings.ES_MAX_RESULT_WINDOW, }) if not es.indices.exists(index): es.indices.create(index, body=config) return index
bsd-3-clause
tylertian/Openstack
openstack F/cinder/cinder/volume/storwize_svc.py
2
55771
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2012 IBM, Inc. # Copyright (c) 2012 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Authors: # Ronen Kat <ronenkat@il.ibm.com> # Avishay Traeger <avishay@il.ibm.com> """ Volume driver for IBM Storwize V7000 and SVC storage systems. Notes: 1. If you specify both a password and a key file, this driver will use the key file only. 2. When using a key file for authentication, it is up to the user or system administrator to store the private key in a safe manner. 3. The defaults for creating volumes are "-rsize 2% -autoexpand -grainsize 256 -warning 0". These can be changed in the configuration file or by using volume types(recommended only for advanced users). Limitations: 1. The driver was not tested with SVC or clustered configurations of Storwize V7000. 2. The driver expects CLI output in English, error messages may be in a localized format. """ import random import re import string import time from cinder import exception from cinder import flags from cinder.openstack.common import cfg from cinder.openstack.common import excutils from cinder.openstack.common import log as logging from cinder.volume import san LOG = logging.getLogger(__name__) storwize_svc_opts = [ cfg.StrOpt('storwize_svc_volpool_name', default='volpool', help='Storage system storage pool for volumes'), cfg.StrOpt('storwize_svc_vol_rsize', default='2%', help='Storage system space-efficiency parameter for volumes'), cfg.StrOpt('storwize_svc_vol_warning', default='0', help='Storage system threshold for volume capacity warnings'), cfg.BoolOpt('storwize_svc_vol_autoexpand', default=True, help='Storage system autoexpand parameter for volumes ' '(True/False)'), cfg.StrOpt('storwize_svc_vol_grainsize', default='256', help='Storage system grain size parameter for volumes ' '(32/64/128/256)'), cfg.BoolOpt('storwize_svc_vol_compression', default=False, help='Storage system compression option for volumes'), cfg.BoolOpt('storwize_svc_vol_easytier', default=True, help='Enable Easy Tier for volumes'), cfg.StrOpt('storwize_svc_flashcopy_timeout', default='120', help='Maximum number of seconds to wait for FlashCopy to be' 'prepared. Maximum value is 600 seconds (10 minutes).'), ] FLAGS = flags.FLAGS FLAGS.register_opts(storwize_svc_opts) class StorwizeSVCDriver(san.SanISCSIDriver): """IBM Storwize V7000 and SVC iSCSI volume driver.""" def __init__(self, *args, **kwargs): super(StorwizeSVCDriver, self).__init__(*args, **kwargs) self.iscsi_ipv4_conf = None self.iscsi_ipv6_conf = None # Build cleanup transaltion tables for hosts names to follow valid # host names for Storwizew V7000 and SVC storage systems. invalid_ch_in_host = '' for num in range(0, 128): ch = chr(num) if ((not ch.isalnum()) and (ch != ' ') and (ch != '.') and (ch != '-') and (ch != '_')): invalid_ch_in_host = invalid_ch_in_host + ch self._string_host_name_filter = string.maketrans(invalid_ch_in_host, '-' * len(invalid_ch_in_host)) self._unicode_host_name_filter = dict((ord(unicode(char)), u'-') for char in invalid_ch_in_host) def _get_hdr_dic(self, header, row, delim): """Return CLI row data as a dictionary indexed by names from header. Create a dictionary object from the data row string using the header string. The strings are converted to columns using the delimiter in delim. """ attributes = header.split(delim) values = row.split(delim) self._driver_assert(len(values) == len(attributes), _('_get_hdr_dic: attribute headers and values do not match.\n ' 'Headers: %(header)s\n Values: %(row)s') % {'header': str(header), 'row': str(row)}) dic = {} for attribute, value in map(None, attributes, values): dic[attribute] = value return dic def _driver_assert(self, assert_condition, exception_message): """Internal assertion mechanism for CLI output.""" if not assert_condition: LOG.error(exception_message) raise exception.VolumeBackendAPIException(data=exception_message) def check_for_setup_error(self): """Check that we have all configuration details from the storage.""" LOG.debug(_('enter: check_for_setup_error')) # Validate that the pool exists ssh_cmd = 'lsmdiskgrp -delim ! -nohdr' out, err = self._run_ssh(ssh_cmd) self._driver_assert(len(out) > 0, _('check_for_setup_error: failed with unexpected CLI output.\n ' 'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s') % {'cmd': ssh_cmd, 'out': str(out), 'err': str(err)}) search_text = '!%s!' % getattr(FLAGS, 'storwize_svc_volpool_name') if search_text not in out: raise exception.InvalidInput( reason=(_('pool %s doesn\'t exist') % getattr(FLAGS, 'storwize_svc_volpool_name'))) storage_nodes = {} # Get the iSCSI names of the Storwize/SVC nodes ssh_cmd = 'svcinfo lsnode -delim !' out, err = self._run_ssh(ssh_cmd) self._driver_assert(len(out) > 0, _('check_for_setup_error: failed with unexpected CLI output.\n ' 'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s') % {'cmd': ssh_cmd, 'out': str(out), 'err': str(err)}) nodes = out.strip().split('\n') self._driver_assert(len(nodes) > 0, _('check_for_setup_error: failed with unexpected CLI output.\n ' 'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s') % {'cmd': ssh_cmd, 'out': str(out), 'err': str(err)}) header = nodes.pop(0) for node_line in nodes: try: node_data = self._get_hdr_dic(header, node_line, '!') except exception.VolumeBackendAPIException as e: with excutils.save_and_reraise_exception(): LOG.error(_('check_for_setup_error: ' 'failed with unexpected CLI output.\n ' 'Command: %(cmd)s\n ' 'stdout: %(out)s\n stderr: %(err)s\n') % {'cmd': ssh_cmd, 'out': str(out), 'err': str(err)}) node = {} try: node['id'] = node_data['id'] node['name'] = node_data['name'] node['iscsi_name'] = node_data['iscsi_name'] node['status'] = node_data['status'] node['ipv4'] = [] node['ipv6'] = [] if node['iscsi_name'] != '': storage_nodes[node['id']] = node except KeyError as e: LOG.error(_('Did not find expected column name in ' 'svcinfo lsnode: %s') % str(e)) exception_message = ( _('check_for_setup_error: Unexpected CLI output.\n ' 'Details: %(msg)s\n' 'Command: %(cmd)s\n ' 'stdout: %(out)s\n stderr: %(err)s') % {'msg': str(e), 'cmd': ssh_cmd, 'out': str(out), 'err': str(err)}) raise exception.VolumeBackendAPIException( data=exception_message) # Get the iSCSI IP addresses of the Storwize/SVC nodes ssh_cmd = 'lsportip -delim !' out, err = self._run_ssh(ssh_cmd) self._driver_assert(len(out) > 0, _('check_for_setup_error: failed with unexpected CLI output.\n ' 'Command: %(cmd)s\n ' 'stdout: %(out)s\n stderr: %(err)s') % {'cmd': ssh_cmd, 'out': str(out), 'err': str(err)}) portips = out.strip().split('\n') self._driver_assert(len(portips) > 0, _('check_for_setup_error: failed with unexpected CLI output.\n ' 'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s') % {'cmd': ssh_cmd, 'out': str(out), 'err': str(err)}) header = portips.pop(0) for portip_line in portips: try: port_data = self._get_hdr_dic(header, portip_line, '!') except exception.VolumeBackendAPIException as e: with excutils.save_and_reraise_exception(): LOG.error(_('check_for_setup_error: ' 'failed with unexpected CLI output.\n ' 'Command: %(cmd)s\n ' 'stdout: %(out)s\n stderr: %(err)s\n') % {'cmd': ssh_cmd, 'out': str(out), 'err': str(err)}) try: port_node_id = port_data['node_id'] port_ipv4 = port_data['IP_address'] port_ipv6 = port_data['IP_address_6'] except KeyError as e: LOG.error(_('Did not find expected column name in ' 'lsportip: %s') % str(e)) exception_message = ( _('check_for_setup_error: Unexpected CLI output.\n ' 'Details: %(msg)s\n' 'Command: %(cmd)s\n ' 'stdout: %(out)s\n stderr: %(err)s') % {'msg': str(e), 'cmd': ssh_cmd, 'out': str(out), 'err': str(err)}) raise exception.VolumeBackendAPIException( data=exception_message) if port_node_id in storage_nodes: node = storage_nodes[port_node_id] if len(port_ipv4) > 0: node['ipv4'].append(port_ipv4) if len(port_ipv6) > 0: node['ipv6'].append(port_ipv6) else: raise exception.VolumeBackendAPIException( data=_('check_for_setup_error: ' 'fail to storage configuration: unknown ' 'storage node %(node_id)s from CLI output.\n ' 'stdout: %(out)s\n stderr: %(err)s\n') % {'node_id': port_node_id, 'out': str(out), 'err': str(err)}) iscsi_ipv4_conf = [] iscsi_ipv6_conf = [] for node_key in storage_nodes: node = storage_nodes[node_key] if 'ipv4' in node and len(node['iscsi_name']) > 0: iscsi_ipv4_conf.append({'iscsi_name': node['iscsi_name'], 'ip': node['ipv4'], 'node_id': node['id']}) if 'ipv6' in node and len(node['iscsi_name']) > 0: iscsi_ipv6_conf.append({'iscsi_name': node['iscsi_name'], 'ip': node['ipv6'], 'node_id': node['id']}) if (len(node['ipv4']) == 0) and (len(node['ipv6']) == 0): raise exception.VolumeBackendAPIException( data=_('check_for_setup_error: ' 'fail to storage configuration: storage ' 'node %s has no IP addresses configured') % node['id']) # Make sure we have at least one IPv4 address with a iSCSI name # TODO(ronenkat) need to expand this to support IPv6 self._driver_assert(len(iscsi_ipv4_conf) > 0, _('could not obtain IP address and iSCSI name from the storage. ' 'Please verify that the storage is configured for iSCSI.\n ' 'Storage nodes: %(nodes)s\n portips: %(portips)s') % {'nodes': nodes, 'portips': portips}) self.iscsi_ipv4_conf = iscsi_ipv4_conf self.iscsi_ipv6_conf = iscsi_ipv6_conf LOG.debug(_('leave: check_for_setup_error')) def _check_num_perc(self, value): """Return True if value is either a number or a percentage.""" if value.endswith('%'): value = value[0:-1] return value.isdigit() def _check_flags(self): """Ensure that the flags are set properly.""" required_flags = ['san_ip', 'san_ssh_port', 'san_login', 'storwize_svc_volpool_name'] for flag in required_flags: if not getattr(FLAGS, flag, None): raise exception.InvalidInput( reason=_('%s is not set') % flag) # Ensure that either password or keyfile were set if not (getattr(FLAGS, 'san_password', None) or getattr(FLAGS, 'san_private_key', None)): raise exception.InvalidInput( reason=_('Password or SSH private key is required for ' 'authentication: set either san_password or ' 'san_private_key option')) # Check that rsize is a number or percentage rsize = getattr(FLAGS, 'storwize_svc_vol_rsize') if not self._check_num_perc(rsize) and (rsize != '-1'): raise exception.InvalidInput( reason=_('Illegal value specified for storwize_svc_vol_rsize: ' 'set to either a number or a percentage')) # Check that warning is a number or percentage warning = getattr(FLAGS, 'storwize_svc_vol_warning') if not self._check_num_perc(warning): raise exception.InvalidInput( reason=_('Illegal value specified for ' 'storwize_svc_vol_warning: ' 'set to either a number or a percentage')) # Check that autoexpand is a boolean autoexpand = getattr(FLAGS, 'storwize_svc_vol_autoexpand') if type(autoexpand) != type(True): raise exception.InvalidInput( reason=_('Illegal value specified for ' 'storwize_svc_vol_autoexpand: set to either ' 'True or False')) # Check that grainsize is 32/64/128/256 grainsize = getattr(FLAGS, 'storwize_svc_vol_grainsize') if grainsize not in ['32', '64', '128', '256']: raise exception.InvalidInput( reason=_('Illegal value specified for ' 'storwize_svc_vol_grainsize: set to either ' '\'32\', \'64\', \'128\', or \'256\'')) # Check that flashcopy_timeout is numeric and 32/64/128/256 flashcopy_timeout = getattr(FLAGS, 'storwize_svc_flashcopy_timeout') if not (flashcopy_timeout.isdigit() and int(flashcopy_timeout) > 0 and int(flashcopy_timeout) <= 600): raise exception.InvalidInput( reason=_('Illegal value %s specified for ' 'storwize_svc_flashcopy_timeout: ' 'valid values are between 0 and 600') % flashcopy_timeout) # Check that compression is a boolean and that rsize is set volume_compression = getattr(FLAGS, 'storwize_svc_vol_compression') if type(volume_compression) != type(True): raise exception.InvalidInput( reason=_('Illegal value specified for ' 'storwize_svc_vol_compression: set to either ' 'True or False')) if ((volume_compression == True) and (getattr(FLAGS, 'storwize_svc_vol_rsize') == '-1')): raise exception.InvalidInput( reason=_('If compression is set to True, rsize must ' 'also be set (not equal to -1)')) # Check that easytier is a boolean volume_easytier = getattr(FLAGS, 'storwize_svc_vol_easytier') if type(volume_easytier) != type(True): raise exception.InvalidInput( reason=_('Illegal value specified for ' 'storwize_svc_vol_easytier: set to either ' 'True or False')) def do_setup(self, context): """Validate the flags.""" LOG.debug(_('enter: do_setup')) self._check_flags() LOG.debug(_('leave: do_setup')) def create_volume(self, volume): """Create a new volume - uses the internal method.""" return self._create_volume(volume, units='gb') def _create_volume(self, volume, units='gb'): """Create a new volume.""" name = volume['name'] model_update = None LOG.debug(_('enter: create_volume: volume %s ') % name) size = int(volume['size']) if getattr(FLAGS, 'storwize_svc_vol_autoexpand') == True: autoex = '-autoexpand' else: autoex = '' if getattr(FLAGS, 'storwize_svc_vol_easytier') == True: easytier = '-easytier on' else: easytier = '-easytier off' # Set space-efficient options if getattr(FLAGS, 'storwize_svc_vol_rsize').strip() == '-1': ssh_cmd_se_opt = '' else: ssh_cmd_se_opt = ('-rsize %(rsize)s %(autoex)s -warning %(warn)s' % {'rsize': getattr(FLAGS, 'storwize_svc_vol_rsize'), 'autoex': autoex, 'warn': getattr(FLAGS, 'storwize_svc_vol_warning')}) if getattr(FLAGS, 'storwize_svc_vol_compression'): ssh_cmd_se_opt = ssh_cmd_se_opt + ' -compressed' else: ssh_cmd_se_opt = ssh_cmd_se_opt + (' -grainsize %(grain)s' % {'grain': getattr(FLAGS, 'storwize_svc_vol_grainsize')}) ssh_cmd = ('mkvdisk -name %(name)s -mdiskgrp %(mdiskgrp)s ' '-iogrp 0 -size %(size)s -unit ' '%(unit)s %(easytier)s %(ssh_cmd_se_opt)s' % {'name': name, 'mdiskgrp': getattr(FLAGS, 'storwize_svc_volpool_name'), 'size': size, 'unit': units, 'easytier': easytier, 'ssh_cmd_se_opt': ssh_cmd_se_opt}) out, err = self._run_ssh(ssh_cmd) self._driver_assert(len(out.strip()) > 0, _('create volume %(name)s - did not find ' 'success message in CLI output.\n ' 'stdout: %(out)s\n stderr: %(err)s') % {'name': name, 'out': str(out), 'err': str(err)}) # Ensure that the output is as expected match_obj = re.search('Virtual Disk, id \[([0-9]+)\], ' 'successfully created', out) # Make sure we got a "successfully created" message with vdisk id self._driver_assert(match_obj is not None, _('create volume %(name)s - did not find ' 'success message in CLI output.\n ' 'stdout: %(out)s\n stderr: %(err)s') % {'name': name, 'out': str(out), 'err': str(err)}) LOG.debug(_('leave: create_volume: volume %(name)s ') % {'name': name}) def delete_volume(self, volume): self._delete_volume(volume, False) def _delete_volume(self, volume, force_opt): """Driver entry point for destroying existing volumes.""" name = volume['name'] LOG.debug(_('enter: delete_volume: volume %(name)s ') % {'name': name}) if force_opt: force_flag = '-force' else: force_flag = '' volume_defined = self._is_volume_defined(name) # Try to delete volume only if found on the storage if volume_defined: out, err = self._run_ssh('rmvdisk %(force)s %(name)s' % {'force': force_flag, 'name': name}) # No output should be returned from rmvdisk self._driver_assert(len(out.strip()) == 0, _('delete volume %(name)s - non empty output from CLI.\n ' 'stdout: %(out)s\n stderr: %(err)s') % {'name': name, 'out': str(out), 'err': str(err)}) else: # Log that volume does not exist LOG.info(_('warning: tried to delete volume %(name)s but ' 'it does not exist.') % {'name': name}) LOG.debug(_('leave: delete_volume: volume %(name)s ') % {'name': name}) def ensure_export(self, context, volume): """Check that the volume exists on the storage. The system does not "export" volumes as a Linux iSCSI target does, and therefore we just check that the volume exists on the storage. """ volume_defined = self._is_volume_defined(volume['name']) if not volume_defined: LOG.error(_('ensure_export: volume %s not found on storage') % volume['name']) def create_export(self, context, volume): model_update = None return model_update def remove_export(self, context, volume): pass def check_for_export(self, context, volume_id): raise NotImplementedError() def initialize_connection(self, volume, connector): """Perform the necessary work so that an iSCSI connection can be made. To be able to create an iSCSI connection from a given iSCSI name to a volume, we must: 1. Translate the given iSCSI name to a host name 2. Create new host on the storage system if it does not yet exist 2. Map the volume to the host if it is not already done 3. Return iSCSI properties, including the IP address of the preferred node for this volume and the LUN number. """ LOG.debug(_('enter: initialize_connection: volume %(vol)s with ' 'connector %(conn)s') % {'vol': str(volume), 'conn': str(connector)}) initiator_name = connector['initiator'] volume_name = volume['name'] host_name = self._get_host_from_iscsiname(initiator_name) # Check if a host is defined for the iSCSI initiator name if host_name is None: # Host does not exist - add a new host to Storwize/SVC host_name = self._create_new_host('host%s' % initiator_name, initiator_name) # Verify that create_new_host succeeded self._driver_assert(host_name is not None, _('_create_new_host failed to return the host name.')) lun_id = self._map_vol_to_host(volume_name, host_name) # Get preferred path # Only IPv4 for now because lack of OpenStack support # TODO(ronenkat): Add support for IPv6 volume_attributes = self._get_volume_attributes(volume_name) if (volume_attributes is not None and 'preferred_node_id' in volume_attributes): preferred_node = volume_attributes['preferred_node_id'] preferred_node_entry = None for node in self.iscsi_ipv4_conf: if node['node_id'] == preferred_node: preferred_node_entry = node break if preferred_node_entry is None: preferred_node_entry = self.iscsi_ipv4_conf[0] LOG.error(_('initialize_connection: did not find preferred ' 'node %(node)s for volume %(vol)s in iSCSI ' 'configuration') % {'node': preferred_node, 'vol': volume_name}) else: # Get 1st node preferred_node_entry = self.iscsi_ipv4_conf[0] LOG.error( _('initialize_connection: did not find a preferred node ' 'for volume %s in iSCSI configuration') % volume_name) properties = {} # We didn't use iSCSI discover, as in server-based iSCSI properties['target_discovered'] = False # We take the first IP address for now. Ideally, OpenStack will # support multipath for improved performance. properties['target_portal'] = ('%s:%s' % (preferred_node_entry['ip'][0], '3260')) properties['target_iqn'] = preferred_node_entry['iscsi_name'] properties['target_lun'] = lun_id properties['volume_id'] = volume['id'] LOG.debug(_('leave: initialize_connection:\n volume: %(vol)s\n ' 'connector %(conn)s\n properties: %(prop)s') % {'vol': str(volume), 'conn': str(connector), 'prop': str(properties)}) return {'driver_volume_type': 'iscsi', 'data': properties, } def terminate_connection(self, volume, connector): """Cleanup after an iSCSI connection has been terminated. When we clean up a terminated connection between a given iSCSI name and volume, we: 1. Translate the given iSCSI name to a host name 2. Remove the volume-to-host mapping if it exists 3. Delete the host if it has no more mappings (hosts are created automatically by this driver when mappings are created) """ LOG.debug(_('enter: terminate_connection: volume %(vol)s with ' 'connector %(conn)s') % {'vol': str(volume), 'conn': str(connector)}) vol_name = volume['name'] initiator_name = connector['initiator'] host_name = self._get_host_from_iscsiname(initiator_name) # Verify that _get_host_from_iscsiname returned the host. # This should always succeed as we terminate an existing connection. self._driver_assert(host_name is not None, _('_get_host_from_iscsiname failed to return the host name ' 'for iscsi name %s') % initiator_name) # Check if vdisk-host mapping exists, remove if it does mapping_data = self._get_hostvdisk_mappings(host_name) if vol_name in mapping_data: out, err = self._run_ssh('rmvdiskhostmap -host %s %s' % (host_name, vol_name)) # Verify CLI behaviour - no output is returned from # rmvdiskhostmap self._driver_assert(len(out.strip()) == 0, _('delete mapping of volume %(vol)s to host %(host)s ' '- non empty output from CLI.\n ' 'stdout: %(out)s\n stderr: %(err)s') % {'vol': vol_name, 'host': host_name, 'out': str(out), 'err': str(err)}) del mapping_data[vol_name] else: LOG.error(_('terminate_connection: no mapping of volume ' '%(vol)s to host %(host)s found') % {'vol': vol_name, 'host': host_name}) # If this host has no more mappings, delete it if not mapping_data: self._delete_host(host_name) LOG.debug(_('leave: terminate_connection: volume %(vol)s with ' 'connector %(conn)s') % {'vol': str(volume), 'conn': str(connector)}) def _flashcopy_cleanup(self, fc_map_id, source, target): """Clean up a failed FlashCopy operation.""" try: out, err = self._run_ssh('stopfcmap -force %s' % fc_map_id) out, err = self._run_ssh('rmfcmap -force %s' % fc_map_id) except exception.ProcessExecutionError as e: LOG.error(_('_run_flashcopy: fail to cleanup failed FlashCopy ' 'mapping %(fc_map_id)% ' 'from %(source)s to %(target)s.\n' 'stdout: %(out)s\n stderr: %(err)s') % {'fc_map_id': fc_map_id, 'source': source, 'target': target, 'out': e.stdout, 'err': e.stderr}) def _run_flashcopy(self, source, target): """Create a FlashCopy mapping from the source to the target.""" LOG.debug( _('enter: _run_flashcopy: execute FlashCopy from source ' '%(source)s to target %(target)s') % {'source': source, 'target': target}) fc_map_cli_cmd = ('mkfcmap -source %s -target %s -autodelete ' '-cleanrate 0' % (source, target)) out, err = self._run_ssh(fc_map_cli_cmd) self._driver_assert(len(out.strip()) > 0, _('create FC mapping from %(source)s to %(target)s - ' 'did not find success message in CLI output.\n' ' stdout: %(out)s\n stderr: %(err)s\n') % {'source': source, 'target': target, 'out': str(out), 'err': str(err)}) # Ensure that the output is as expected match_obj = re.search('FlashCopy Mapping, id \[([0-9]+)\], ' 'successfully created', out) # Make sure we got a "successfully created" message with vdisk id self._driver_assert(match_obj is not None, _('create FC mapping from %(source)s to %(target)s - ' 'did not find success message in CLI output.\n' ' stdout: %(out)s\n stderr: %(err)s\n') % {'source': source, 'target': target, 'out': str(out), 'err': str(err)}) try: fc_map_id = match_obj.group(1) self._driver_assert(fc_map_id is not None, _('create FC mapping from %(source)s to %(target)s - ' 'did not find mapping id in CLI output.\n' ' stdout: %(out)s\n stderr: %(err)s\n') % {'source': source, 'target': target, 'out': str(out), 'err': str(err)}) except IndexError: self._driver_assert(False, _('create FC mapping from %(source)s to %(target)s - ' 'did not find mapping id in CLI output.\n' ' stdout: %(out)s\n stderr: %(err)s\n') % {'source': source, 'target': target, 'out': str(out), 'err': str(err)}) try: out, err = self._run_ssh('prestartfcmap %s' % fc_map_id) except exception.ProcessExecutionError as e: with excutils.save_and_reraise_exception(): LOG.error(_('_run_flashcopy: fail to prepare FlashCopy ' 'from %(source)s to %(target)s.\n' 'stdout: %(out)s\n stderr: %(err)s') % {'source': source, 'target': target, 'out': e.stdout, 'err': e.stderr}) self._flashcopy_cleanup(fc_map_id, source, target) mapping_ready = False wait_time = 5 # Allow waiting of up to timeout (set as parameter) max_retries = (int(getattr(FLAGS, 'storwize_svc_flashcopy_timeout')) / wait_time) + 1 for try_number in range(1, max_retries): mapping_attributes = self._get_flashcopy_mapping_attributes( fc_map_id) if (mapping_attributes is None or 'status' not in mapping_attributes): break if mapping_attributes['status'] == 'prepared': mapping_ready = True break elif mapping_attributes['status'] != 'preparing': # Unexpected mapping status exception_msg = (_('unexecpted mapping status %(status)s ' 'for mapping %(id)s. Attributes: ' '%(attr)s') % {'status': mapping_attributes['status'], 'id': fc_map_id, 'attr': mapping_attributes}) raise exception.VolumeBackendAPIException( data=exception_msg) # Need to wait for mapping to be prepared, wait a few seconds time.sleep(wait_time) if not mapping_ready: exception_msg = (_('mapping %(id)s prepare failed to complete ' 'within the alloted %(to)s seconds timeout. ' 'Terminating') % {'id': fc_map_id, 'to': getattr( FLAGS, 'storwize_svc_flashcopy_timeout')}) LOG.error(_('_run_flashcopy: fail to start FlashCopy ' 'from %(source)s to %(target)s with ' 'exception %(ex)s') % {'source': source, 'target': target, 'ex': exception_msg}) self._flashcopy_cleanup(fc_map_id, source, target) raise exception.InvalidSnapshot( reason=_('_run_flashcopy: %s') % exception_msg) try: out, err = self._run_ssh('startfcmap %s' % fc_map_id) except exception.ProcessExecutionError as e: with excutils.save_and_reraise_exception(): LOG.error(_('_run_flashcopy: fail to start FlashCopy ' 'from %(source)s to %(target)s.\n' 'stdout: %(out)s\n stderr: %(err)s') % {'source': source, 'target': target, 'out': e.stdout, 'err': e.stderr}) self._flashcopy_cleanup(fc_map_id, source, target) LOG.debug(_('leave: _run_flashcopy: FlashCopy started from ' '%(source)s to %(target)s') % {'source': source, 'target': target}) def create_volume_from_snapshot(self, volume, snapshot): """Create a new snapshot from volume.""" source_volume = snapshot['name'] tgt_volume = volume['name'] LOG.debug(_('enter: create_volume_from_snapshot: snapshot %(tgt)s ' 'from volume %(src)s') % {'tgt': tgt_volume, 'src': source_volume}) src_volume_attributes = self._get_volume_attributes(source_volume) if src_volume_attributes is None: exception_msg = (_('create_volume_from_snapshot: source volume %s ' 'does not exist') % source_volume) LOG.error(exception_msg) raise exception.SnapshotNotFound(exception_msg, volume_id=source_volume) self._driver_assert('capacity' in src_volume_attributes, _('create_volume_from_snapshot: cannot get source ' 'volume %(src)s capacity from volume attributes ' '%(attr)s') % {'src': source_volume, 'attr': src_volume_attributes}) src_volume_size = src_volume_attributes['capacity'] tgt_volume_attributes = self._get_volume_attributes(tgt_volume) # Does the snapshot target exist? if tgt_volume_attributes is not None: exception_msg = (_('create_volume_from_snapshot: target volume %s ' 'already exists, cannot create') % tgt_volume) LOG.error(exception_msg) raise exception.InvalidSnapshot(reason=exception_msg) snapshot_volume = {} snapshot_volume['name'] = tgt_volume snapshot_volume['size'] = src_volume_size self._create_volume(snapshot_volume, units='b') try: self._run_flashcopy(source_volume, tgt_volume) except Exception: with excutils.save_and_reraise_exception(): # Clean up newly-created snapshot if the FlashCopy failed self._delete_volume(snapshot_volume, True) LOG.debug( _('leave: create_volume_from_snapshot: %s created successfully') % tgt_volume) def create_snapshot(self, snapshot): """Create a new snapshot using FlashCopy.""" src_volume = snapshot['volume_name'] tgt_volume = snapshot['name'] # Flag to keep track of created volumes in case FlashCopy tgt_volume_created = False LOG.debug(_('enter: create_snapshot: snapshot %(tgt)s from ' 'volume %(src)s') % {'tgt': tgt_volume, 'src': src_volume}) src_volume_attributes = self._get_volume_attributes(src_volume) if src_volume_attributes is None: exception_msg = ( _('create_snapshot: source volume %s does not exist') % src_volume) LOG.error(exception_msg) raise exception.VolumeNotFound(exception_msg, volume_id=src_volume) self._driver_assert('capacity' in src_volume_attributes, _('create_volume_from_snapshot: cannot get source ' 'volume %(src)s capacity from volume attributes ' '%(attr)s') % {'src': src_volume, 'attr': src_volume_attributes}) source_volume_size = src_volume_attributes['capacity'] tgt_volume_attributes = self._get_volume_attributes(tgt_volume) # Does the snapshot target exist? snapshot_volume = {} if tgt_volume_attributes is None: # No, create a new snapshot volume snapshot_volume['name'] = tgt_volume snapshot_volume['size'] = source_volume_size self._create_volume(snapshot_volume, units='b') tgt_volume_created = True else: # Yes, target exists, verify exact same size as source self._driver_assert('capacity' in tgt_volume_attributes, _('create_volume_from_snapshot: cannot get source ' 'volume %(src)s capacity from volume attributes ' '%(attr)s') % {'src': tgt_volume, 'attr': tgt_volume_attributes}) target_volume_size = tgt_volume_attributes['capacity'] if target_volume_size != source_volume_size: exception_msg = ( _('create_snapshot: source %(src)s and target ' 'volume %(tgt)s have different capacities ' '(source:%(ssize)s target:%(tsize)s)') % {'src': src_volume, 'tgt': tgt_volume, 'ssize': source_volume_size, 'tsize': target_volume_size}) LOG.error(exception_msg) raise exception.InvalidSnapshot(reason=exception_msg) try: self._run_flashcopy(src_volume, tgt_volume) except exception.InvalidSnapshot: with excutils.save_and_reraise_exception(): # Clean up newly-created snapshot if the FlashCopy failed if tgt_volume_created: self._delete_volume(snapshot_volume, True) LOG.debug(_('leave: create_snapshot: %s created successfully') % tgt_volume) def delete_snapshot(self, snapshot): self._delete_snapshot(snapshot, False) def _delete_snapshot(self, snapshot, force_opt): """Delete a snapshot from the storage.""" LOG.debug(_('enter: delete_snapshot: snapshot %s') % snapshot) snapshot_defined = self._is_volume_defined(snapshot['name']) if snapshot_defined: if force_opt: self._delete_volume(snapshot, force_opt) else: self.delete_volume(snapshot) LOG.debug(_('leave: delete_snapshot: snapshot %s') % snapshot) def _get_host_from_iscsiname(self, iscsi_name): """List the hosts defined in the storage. Return the host name with the given iSCSI name, or None if there is no host name with that iSCSI name. """ LOG.debug(_('enter: _get_host_from_iscsiname: iSCSI initiator %s') % iscsi_name) # Get list of host in the storage ssh_cmd = 'lshost -delim !' out, err = self._run_ssh(ssh_cmd) if (len(out.strip()) == 0): return None err_msg = _('_get_host_from_iscsiname: ' 'failed with unexpected CLI output.\n' ' command: %(cmd)s\n stdout: %(out)s\n ' 'stderr: %(err)s') % {'cmd': ssh_cmd, 'out': str(out), 'err': str(err)} host_lines = out.strip().split('\n') self._driver_assert(len(host_lines) > 0, err_msg) header = host_lines.pop(0).split('!') self._driver_assert('name' in header, err_msg) name_index = header.index('name') hosts = map(lambda x: x.split('!')[name_index], host_lines) hostname = None # For each host, get its details and check for its iSCSI name for host in hosts: ssh_cmd = 'lshost -delim ! %s' % host out, err = self._run_ssh(ssh_cmd) self._driver_assert(len(out) > 0, _('_get_host_from_iscsiname: ' 'Unexpected response from CLI output. ' 'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s') % {'cmd': ssh_cmd, 'out': str(out), 'err': str(err)}) for attrib_line in out.split('\n'): # If '!' not found, return the string and two empty strings attrib_name, foo, attrib_value = attrib_line.partition('!') if attrib_name == 'iscsi_name': if iscsi_name == attrib_value: hostname = host break if hostname is not None: break LOG.debug(_('leave: _get_host_from_iscsiname: iSCSI initiator %s') % iscsi_name) return hostname def _create_new_host(self, host_name, initiator_name): """Create a new host on the storage system. We modify the given host name, replace any invalid characters and adding a random suffix to avoid conflicts due to the translation. The host is associated with the given iSCSI initiator name. """ LOG.debug(_('enter: _create_new_host: host %(name)s with iSCSI ' 'initiator %(init)s') % {'name': host_name, 'init': initiator_name}) if isinstance(host_name, unicode): host_name = host_name.translate(self._unicode_host_name_filter) elif isinstance(host_name, str): host_name = host_name.translate(self._string_host_name_filter) else: msg = _('_create_new_host: cannot clean host name. Host name ' 'is not unicode or string') LOG.error(msg) raise exception.NoValidHost(reason=msg) # Add 5 digit random suffix to the host name to avoid # conflicts in host names after removing invalid characters # for Storwize/SVC names host_name = '%s_%s' % (host_name, random.randint(10000, 99999)) out, err = self._run_ssh('mkhost -name "%s" -iscsiname "%s"' % (host_name, initiator_name)) self._driver_assert(len(out.strip()) > 0 and 'successfully created' in out, _('create host %(name)s with iSCSI initiator %(init)s - ' 'did not find success message in CLI output.\n ' 'stdout: %(out)s\n stderr: %(err)s\n') % {'name': host_name, 'init': initiator_name, 'out': str(out), 'err': str(err)}) LOG.debug(_('leave: _create_new_host: host %(host)s with iSCSI ' 'initiator %(init)s') % {'host': host_name, 'init': initiator_name}) return host_name def _delete_host(self, host_name): """Delete a host and associated iSCSI initiator name.""" LOG.debug(_('enter: _delete_host: host %s ') % host_name) # Check if host exists on system, expect to find the host is_defined = self._is_host_defined(host_name) if is_defined: # Delete host out, err = self._run_ssh('rmhost %s ' % host_name) else: LOG.info(_('warning: tried to delete host %(name)s but ' 'it does not exist.') % {'name': host_name}) LOG.debug(_('leave: _delete_host: host %s ') % host_name) def _is_volume_defined(self, volume_name): """Check if volume is defined.""" LOG.debug(_('enter: _is_volume_defined: volume %s ') % volume_name) volume_attributes = self._get_volume_attributes(volume_name) LOG.debug(_('leave: _is_volume_defined: volume %(vol)s with %(str)s ') % {'vol': volume_name, 'str': volume_attributes is not None}) if volume_attributes is None: return False else: return True def _is_host_defined(self, host_name): """Check if a host is defined on the storage.""" LOG.debug(_('enter: _is_host_defined: host %s ') % host_name) # Get list of hosts with the name %host_name% # We expect zero or one line if host does not exist, # two lines if it does exist, otherwise error out, err = self._run_ssh('lshost -filtervalue name=%s -delim !' % host_name) if len(out.strip()) == 0: return False lines = out.strip().split('\n') self._driver_assert(len(lines) <= 2, _('_is_host_defined: Unexpected response from CLI output.\n ' 'stdout: %(out)s\n stderr: %(err)s\n') % {'out': str(out), 'err': str(err)}) if len(lines) == 2: host_info = self._get_hdr_dic(lines[0], lines[1], '!') host_name_from_storage = host_info['name'] # Make sure we got the data for the right host self._driver_assert(host_name_from_storage == host_name, _('Data received for host %(host1)s instead of host ' '%(host2)s.\n ' 'stdout: %(out)s\n stderr: %(err)s\n') % {'host1': host_name_from_storage, 'host2': host_name, 'out': str(out), 'err': str(err)}) else: # 0 or 1 lines host_name_from_storage = None LOG.debug(_('leave: _is_host_defined: host %(host)s with %(str)s ') % { 'host': host_name, 'str': host_name_from_storage is not None}) if host_name_from_storage is None: return False else: return True def _get_hostvdisk_mappings(self, host_name): """Return the defined storage mappings for a host.""" return_data = {} ssh_cmd = 'lshostvdiskmap -delim ! %s' % host_name out, err = self._run_ssh(ssh_cmd) mappings = out.strip().split('\n') if len(mappings) > 0: header = mappings.pop(0) for mapping_line in mappings: mapping_data = self._get_hdr_dic(header, mapping_line, '!') return_data[mapping_data['vdisk_name']] = mapping_data return return_data def _map_vol_to_host(self, volume_name, host_name): """Create a mapping between a volume to a host.""" LOG.debug(_('enter: _map_vol_to_host: volume %(vol)s to ' 'host %(host)s') % {'vol': volume_name, 'host': host_name}) # Check if this volume is already mapped to this host mapping_data = self._get_hostvdisk_mappings(host_name) mapped_flag = False result_lun = '-1' if volume_name in mapping_data: mapped_flag = True result_lun = mapping_data[volume_name]['SCSI_id'] else: lun_used = [] for k, v in mapping_data.iteritems(): lun_used.append(int(v['SCSI_id'])) lun_used.sort() # Assume all luns are taken to this point, and then try to find # an unused one result_lun = str(len(lun_used)) for index, n in enumerate(lun_used): if n > index: result_lun = str(index) # Volume is not mapped to host, create a new LUN if not mapped_flag: out, err = self._run_ssh('mkvdiskhostmap -host %s -scsi %s %s' % (host_name, result_lun, volume_name)) self._driver_assert(len(out.strip()) > 0 and 'successfully created' in out, _('_map_vol_to_host: mapping host %(host)s to ' 'volume %(vol)s with LUN ' '%(lun)s - did not find success message in CLI output. ' 'stdout: %(out)s\n stderr: %(err)s\n') % {'host': host_name, 'vol': volume_name, 'lun': result_lun, 'out': str(out), 'err': str(err)}) LOG.debug(_('leave: _map_vol_to_host: LUN %(lun)s, volume %(vol)s, ' 'host %(host)s') % {'lun': result_lun, 'vol': volume_name, 'host': host_name}) return result_lun def _get_flashcopy_mapping_attributes(self, fc_map_id): """Return the attributes of a FlashCopy mapping. Returns the attributes for the specified FlashCopy mapping, or None if the mapping does not exist. An exception is raised if the information from system can not be parsed or matched to a single FlashCopy mapping (this case should not happen under normal conditions). """ LOG.debug(_('enter: _get_flashcopy_mapping_attributes: mapping %s') % fc_map_id) # Get the lunid to be used fc_ls_map_cmd = ('lsfcmap -filtervalue id=%s -delim !' % fc_map_id) out, err = self._run_ssh(fc_ls_map_cmd) self._driver_assert(len(out) > 0, _('_get_flashcopy_mapping_attributes: ' 'Unexpected response from CLI output. ' 'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s') % {'cmd': fc_ls_map_cmd, 'out': str(out), 'err': str(err)}) # Get list of FlashCopy mappings # We expect zero or one line if mapping does not exist, # two lines if it does exist, otherwise error lines = out.strip().split('\n') self._driver_assert(len(lines) <= 2, _('_get_flashcopy_mapping_attributes: ' 'Unexpected response from CLI output. ' 'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s') % {'cmd': fc_ls_map_cmd, 'out': str(out), 'err': str(err)}) if len(lines) == 2: attributes = self._get_hdr_dic(lines[0], lines[1], '!') else: # 0 or 1 lines attributes = None LOG.debug(_('leave: _get_flashcopy_mapping_attributes: mapping ' '%(id)s, attributes %(attr)s') % {'id': fc_map_id, 'attr': attributes}) return attributes def _get_volume_attributes(self, volume_name): """Return volume attributes, or None if volume does not exist Exception is raised if the information from system can not be parsed/matched to a single volume. """ LOG.debug(_('enter: _get_volume_attributes: volume %s') % volume_name) # Get the lunid to be used try: ssh_cmd = 'lsvdisk -bytes -delim ! %s ' % volume_name out, err = self._run_ssh(ssh_cmd) except exception.ProcessExecutionError as e: # Didn't get details from the storage, return None LOG.error(_('CLI Exception output:\n command: %(cmd)s\n ' 'stdout: %(out)s\n stderr: %(err)s') % {'cmd': ssh_cmd, 'out': e.stdout, 'err': e.stderr}) return None self._driver_assert(len(out) > 0, ('_get_volume_attributes: ' 'Unexpected response from CLI output. ' 'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s') % {'cmd': ssh_cmd, 'out': str(out), 'err': str(err)}) attributes = {} for attrib_line in out.split('\n'): # If '!' not found, return the string and two empty strings attrib_name, foo, attrib_value = attrib_line.partition('!') if attrib_name is not None and attrib_name.strip() > 0: attributes[attrib_name] = attrib_value LOG.debug(_('leave: _get_volume_attributes:\n volume %(vol)s\n ' 'attributes: %(attr)s') % {'vol': volume_name, 'attr': str(attributes)}) return attributes
apache-2.0
linglaiyao1314/elasticsearch
dev-tools/prepare_release_update_documentation.py
269
5009
# Licensed to Elasticsearch under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on # an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. # Prepare a release: Update the documentation and commit # # USAGE: # # python3 ./dev-tools/prepare_release_update_documentation.py # # Note: Ensure the script is run from the root directory # This script needs to be run and then pushed, # before proceeding with prepare_release_create-release-version.py # on your build VM # import fnmatch import subprocess import tempfile import re import os import shutil def run(command): if os.system('%s' % (command)): raise RuntimeError(' FAILED: %s' % (command)) def ensure_checkout_is_clean(): # Make sure no local mods: s = subprocess.check_output('git diff --shortstat', shell=True) if len(s) > 0: raise RuntimeError('git diff --shortstat is non-empty: got:\n%s' % s) # Make sure no untracked files: s = subprocess.check_output('git status', shell=True).decode('utf-8', errors='replace') if 'Untracked files:' in s: raise RuntimeError('git status shows untracked files: got:\n%s' % s) # Make sure we have all changes from origin: if 'is behind' in s: raise RuntimeError('git status shows not all changes pulled from origin; try running "git pull origin" in this branch: got:\n%s' % (s)) # Make sure we no local unpushed changes (this is supposed to be a clean area): if 'is ahead' in s: raise RuntimeError('git status shows local commits; try running "git fetch origin", "git checkout ", "git reset --hard origin/" in this branch: got:\n%s' % (s)) # Reads the given file and applies the # callback to it. If the callback changed # a line the given file is replaced with # the modified input. def process_file(file_path, line_callback): fh, abs_path = tempfile.mkstemp() modified = False with open(abs_path,'w', encoding='utf-8') as new_file: with open(file_path, encoding='utf-8') as old_file: for line in old_file: new_line = line_callback(line) modified = modified or (new_line != line) new_file.write(new_line) os.close(fh) if modified: #Remove original file os.remove(file_path) #Move new file shutil.move(abs_path, file_path) return True else: # nothing to do - just remove the tmp file os.remove(abs_path) return False # Checks the pom.xml for the release version. # This method fails if the pom file has no SNAPSHOT version set ie. # if the version is already on a release version we fail. # Returns the next version string ie. 0.90.7 def find_release_version(): with open('pom.xml', encoding='utf-8') as file: for line in file: match = re.search(r'<version>(.+)-SNAPSHOT</version>', line) if match: return match.group(1) raise RuntimeError('Could not find release version in branch') # Stages the given files for the next git commit def add_pending_files(*files): for file in files: if file: # print("Adding file: %s" % (file)) run('git add %s' % (file)) # Updates documentation feature flags def commit_feature_flags(release): run('git commit -m "Update Documentation Feature Flags [%s]"' % release) # Walks the given directory path (defaults to 'docs') # and replaces all 'coming[$version]' tags with # 'added[$version]'. This method only accesses asciidoc files. def update_reference_docs(release_version, path='docs'): pattern = 'coming[%s' % (release_version) replacement = 'added[%s' % (release_version) pending_files = [] def callback(line): return line.replace(pattern, replacement) for root, _, file_names in os.walk(path): for file_name in fnmatch.filter(file_names, '*.asciidoc'): full_path = os.path.join(root, file_name) if process_file(full_path, callback): pending_files.append(os.path.join(root, file_name)) return pending_files if __name__ == "__main__": release_version = find_release_version() print('*** Preparing release version documentation: [%s]' % release_version) ensure_checkout_is_clean() pending_files = update_reference_docs(release_version) if pending_files: add_pending_files(*pending_files) # expects var args use * to expand commit_feature_flags(release_version) else: print('WARNING: no documentation references updates for release %s' % (release_version)) print('*** Done.')
apache-2.0
nicklhy/mxnet
example/image-classification/symbols/inception-v4.py
57
8706
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # -*- coding:utf-8 -*- __author__ = 'zhangshuai' modified_date = '16/7/5' __modify__ = 'anchengwu' modified_date = '17/2/22' ''' Inception v4 , suittable for image with around 299 x 299 Reference: Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning Christian Szegedy, Sergey Ioffe, Vincent Vanhoucke arXiv.1602.07261 ''' import mxnet as mx import numpy as np def Conv(data, num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), name=None, suffix=''): conv = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, no_bias=True, name='%s%s_conv2d' %(name, suffix)) bn = mx.sym.BatchNorm(data=conv, name='%s%s_batchnorm' %(name, suffix), fix_gamma=True) act = mx.sym.Activation(data=bn, act_type='relu', name='%s%s_relu' %(name, suffix)) return act def Inception_stem(data, name= None): c = Conv(data, 32, kernel=(3, 3), stride=(2, 2), name='%s_conv1_3*3' %name) c = Conv(c, 32, kernel=(3, 3), name='%s_conv2_3*3' %name) c = Conv(c, 64, kernel=(3, 3), pad=(1, 1), name='%s_conv3_3*3' %name) p1 = mx.sym.Pooling(c, kernel=(3, 3), stride=(2, 2), pool_type='max', name='%s_maxpool_1' %name) c2 = Conv(c, 96, kernel=(3, 3), stride=(2, 2), name='%s_conv4_3*3' %name) concat = mx.sym.Concat(*[p1, c2], name='%s_concat_1' %name) c1 = Conv(concat, 64, kernel=(1, 1), pad=(0, 0), name='%s_conv5_1*1' %name) c1 = Conv(c1, 96, kernel=(3, 3), name='%s_conv6_3*3' %name) c2 = Conv(concat, 64, kernel=(1, 1), pad=(0, 0), name='%s_conv7_1*1' %name) c2 = Conv(c2, 64, kernel=(7, 1), pad=(3, 0), name='%s_conv8_7*1' %name) c2 = Conv(c2, 64, kernel=(1, 7), pad=(0, 3), name='%s_conv9_1*7' %name) c2 = Conv(c2, 96, kernel=(3, 3), pad=(0, 0), name='%s_conv10_3*3' %name) concat = mx.sym.Concat(*[c1, c2], name='%s_concat_2' %name) c1 = Conv(concat, 192, kernel=(3, 3), stride=(2, 2), name='%s_conv11_3*3' %name) p1 = mx.sym.Pooling(concat, kernel=(3, 3), stride=(2, 2), pool_type='max', name='%s_maxpool_2' %name) concat = mx.sym.Concat(*[c1, p1], name='%s_concat_3' %name) return concat def InceptionA(input, name=None): p1 = mx.sym.Pooling(input, kernel=(3, 3), pad=(1, 1), pool_type='avg', name='%s_avgpool_1' %name) c1 = Conv(p1, 96, kernel=(1, 1), pad=(0, 0), name='%s_conv1_1*1' %name) c2 = Conv(input, 96, kernel=(1, 1), pad=(0, 0), name='%s_conv2_1*1' %name) c3 = Conv(input, 64, kernel=(1, 1), pad=(0, 0), name='%s_conv3_1*1' %name) c3 = Conv(c3, 96, kernel=(3, 3), pad=(1, 1), name='%s_conv4_3*3' %name) c4 = Conv(input, 64, kernel=(1, 1), pad=(0, 0), name='%s_conv5_1*1' % name) c4 = Conv(c4, 96, kernel=(3, 3), pad=(1, 1), name='%s_conv6_3*3' % name) c4 = Conv(c4, 96, kernel=(3, 3), pad=(1, 1), name='%s_conv7_3*3' %name) concat = mx.sym.Concat(*[c1, c2, c3, c4], name='%s_concat_1' %name) return concat def ReductionA(input, name=None): p1 = mx.sym.Pooling(input, kernel=(3, 3), stride=(2, 2), pool_type='max', name='%s_maxpool_1' %name) c2 = Conv(input, 384, kernel=(3, 3), stride=(2, 2), name='%s_conv1_3*3' %name) c3 = Conv(input, 192, kernel=(1, 1), pad=(0, 0), name='%s_conv2_1*1' %name) c3 = Conv(c3, 224, kernel=(3, 3), pad=(1, 1), name='%s_conv3_3*3' %name) c3 = Conv(c3, 256, kernel=(3, 3), stride=(2, 2), pad=(0, 0), name='%s_conv4_3*3' %name) concat = mx.sym.Concat(*[p1, c2, c3], name='%s_concat_1' %name) return concat def InceptionB(input, name=None): p1 = mx.sym.Pooling(input, kernel=(3, 3), pad=(1, 1), pool_type='avg', name='%s_avgpool_1' %name) c1 = Conv(p1, 128, kernel=(1, 1), pad=(0, 0), name='%s_conv1_1*1' %name) c2 = Conv(input, 384, kernel=(1, 1), pad=(0, 0), name='%s_conv2_1*1' %name) c3 = Conv(input, 192, kernel=(1, 1), pad=(0, 0), name='%s_conv3_1*1' %name) c3 = Conv(c3, 224, kernel=(1, 7), pad=(0, 3), name='%s_conv4_1*7' %name) #paper wrong c3 = Conv(c3, 256, kernel=(7, 1), pad=(3, 0), name='%s_conv5_1*7' %name) c4 = Conv(input, 192, kernel=(1, 1), pad=(0, 0), name='%s_conv6_1*1' %name) c4 = Conv(c4, 192, kernel=(1, 7), pad=(0, 3), name='%s_conv7_1*7' %name) c4 = Conv(c4, 224, kernel=(7, 1), pad=(3, 0), name='%s_conv8_7*1' %name) c4 = Conv(c4, 224, kernel=(1, 7), pad=(0, 3), name='%s_conv9_1*7' %name) c4 = Conv(c4, 256, kernel=(7, 1), pad=(3, 0), name='%s_conv10_7*1' %name) concat = mx.sym.Concat(*[c1, c2, c3, c4], name='%s_concat_1' %name) return concat def ReductionB(input,name=None): p1 = mx.sym.Pooling(input, kernel=(3, 3), stride=(2, 2), pool_type='max', name='%s_maxpool_1' %name) c2 = Conv(input, 192, kernel=(1, 1), pad=(0, 0), name='%s_conv1_1*1' %name) c2 = Conv(c2, 192, kernel=(3, 3), stride=(2, 2), name='%s_conv2_3*3' %name) c3 = Conv(input, 256, kernel=(1, 1), pad=(0, 0), name='%s_conv3_1*1' %name) c3 = Conv(c3, 256, kernel=(1, 7), pad=(0, 3), name='%s_conv4_1*7' %name) c3 = Conv(c3, 320, kernel=(7, 1), pad=(3, 0), name='%s_conv5_7*1' %name) c3 = Conv(c3, 320, kernel=(3, 3), stride=(2, 2), name='%s_conv6_3*3' %name) concat = mx.sym.Concat(*[p1, c2, c3], name='%s_concat_1' %name) return concat def InceptionC(input, name=None): p1 = mx.sym.Pooling(input, kernel=(3, 3), pad=(1, 1), pool_type='avg', name='%s_avgpool_1' %name) c1 = Conv(p1, 256, kernel=(1, 1), pad=(0, 0), name='%s_conv1_1*1' %name) c2 = Conv(input, 256, kernel=(1, 1), pad=(0, 0), name='%s_conv2_1*1' %name) c3 = Conv(input, 384, kernel=(1, 1), pad=(0, 0), name='%s_conv3_1*1' %name) c3_1 = Conv(c3, 256, kernel=(1, 3), pad=(0, 1), name='%s_conv4_3*1' %name) c3_2 = Conv(c3, 256, kernel=(3, 1), pad=(1, 0), name='%s_conv5_1*3' %name) c4 = Conv(input, 384, kernel=(1, 1), pad=(0, 0), name='%s_conv6_1*1' %name) c4 = Conv(c4, 448, kernel=(1, 3), pad=(0, 1), name='%s_conv7_1*3' %name) c4 = Conv(c4, 512, kernel=(3, 1), pad=(1, 0), name='%s_conv8_3*1' %name) c4_1 = Conv(c4, 256, kernel=(3, 1), pad=(1, 0), name='%s_conv9_1*3' %name) c4_2 = Conv(c4, 256, kernel=(1, 3), pad=(0, 1), name='%s_conv10_3*1' %name) concat = mx.sym.Concat(*[c1, c2, c3_1, c3_2, c4_1, c4_2], name='%s_concat' %name) return concat def get_symbol(num_classes=1000, dtype='float32', **kwargs): data = mx.sym.Variable(name="data") if dtype == 'float32': data = mx.sym.identity(data=data, name='id') else: if dtype == 'float16': data = mx.sym.Cast(data=data, dtype=np.float16) x = Inception_stem(data, name='in_stem') #4 * InceptionA # x = InceptionA(x, name='in1A') # x = InceptionA(x, name='in2A') # x = InceptionA(x, name='in3A') # x = InceptionA(x, name='in4A') for i in range(4): x = InceptionA(x, name='in%dA' %(i+1)) #Reduction A x = ReductionA(x, name='re1A') #7 * InceptionB # x = InceptionB(x, name='in1B') # x = InceptionB(x, name='in2B') # x = InceptionB(x, name='in3B') # x = InceptionB(x, name='in4B') # x = InceptionB(x, name='in5B') # x = InceptionB(x, name='in6B') # x = InceptionB(x, name='in7B') for i in range(7): x = InceptionB(x, name='in%dB' %(i+1)) #ReductionB x = ReductionB(x, name='re1B') #3 * InceptionC # x = InceptionC(x, name='in1C') # x = InceptionC(x, name='in2C') # x = InceptionC(x, name='in3C') for i in range(3): x = InceptionC(x, name='in%dC' %(i+1)) #Average Pooling x = mx.sym.Pooling(x, kernel=(8, 8), pad=(1, 1), pool_type='avg', name='global_avgpool') #Dropout x = mx.sym.Dropout(x, p=0.2) flatten = mx.sym.Flatten(x, name='flatten') fc1 = mx.sym.FullyConnected(flatten, num_hidden=num_classes, name='fc1') if dtype == 'float16': fc1 = mx.sym.Cast(data=fc1, dtype=np.float32) softmax = mx.sym.SoftmaxOutput(fc1, name='softmax') return softmax
apache-2.0
suneeth51/neutron
neutron/db/models_v2.py
4
11233
# Copyright (c) 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import uuidutils import sqlalchemy as sa from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy import orm from neutron.api.v2 import attributes as attr from neutron.common import constants from neutron.db import model_base class HasTenant(object): """Tenant mixin, add to subclasses that have a tenant.""" # NOTE(jkoelker) tenant_id is just a free form string ;( tenant_id = sa.Column(sa.String(attr.TENANT_ID_MAX_LEN), index=True) class HasId(object): """id mixin, add to subclasses that have an id.""" id = sa.Column(sa.String(36), primary_key=True, default=uuidutils.generate_uuid) class HasStatusDescription(object): """Status with description mixin.""" status = sa.Column(sa.String(16), nullable=False) status_description = sa.Column(sa.String(attr.DESCRIPTION_MAX_LEN)) class IPAvailabilityRange(model_base.BASEV2): """Internal representation of available IPs for Neutron subnets. Allocation - first entry from the range will be allocated. If the first entry is equal to the last entry then this row will be deleted. Recycling ips involves reading the IPAllocationPool and IPAllocation tables and inserting ranges representing available ips. This happens after the final allocation is pulled from this table and a new ip allocation is requested. Any contiguous ranges of available ips will be inserted as a single range. """ allocation_pool_id = sa.Column(sa.String(36), sa.ForeignKey('ipallocationpools.id', ondelete="CASCADE"), nullable=False, primary_key=True) first_ip = sa.Column(sa.String(64), nullable=False, primary_key=True) last_ip = sa.Column(sa.String(64), nullable=False, primary_key=True) __table_args__ = ( sa.UniqueConstraint( first_ip, allocation_pool_id, name='uniq_ipavailabilityranges0first_ip0allocation_pool_id'), sa.UniqueConstraint( last_ip, allocation_pool_id, name='uniq_ipavailabilityranges0last_ip0allocation_pool_id'), model_base.BASEV2.__table_args__ ) def __repr__(self): return "%s - %s" % (self.first_ip, self.last_ip) class IPAllocationPool(model_base.BASEV2, HasId): """Representation of an allocation pool in a Neutron subnet.""" subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id', ondelete="CASCADE"), nullable=True) first_ip = sa.Column(sa.String(64), nullable=False) last_ip = sa.Column(sa.String(64), nullable=False) available_ranges = orm.relationship(IPAvailabilityRange, backref='ipallocationpool', lazy="select", cascade='all, delete-orphan') def __repr__(self): return "%s - %s" % (self.first_ip, self.last_ip) class IPAllocation(model_base.BASEV2): """Internal representation of allocated IP addresses in a Neutron subnet. """ port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), nullable=True) ip_address = sa.Column(sa.String(64), nullable=False, primary_key=True) subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id', ondelete="CASCADE"), nullable=False, primary_key=True) network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id", ondelete="CASCADE"), nullable=False, primary_key=True) class Route(object): """mixin of a route.""" destination = sa.Column(sa.String(64), nullable=False, primary_key=True) nexthop = sa.Column(sa.String(64), nullable=False, primary_key=True) class SubnetRoute(model_base.BASEV2, Route): subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id', ondelete="CASCADE"), primary_key=True) class Port(model_base.BASEV2, HasId, HasTenant): """Represents a port on a Neutron v2 network.""" name = sa.Column(sa.String(attr.NAME_MAX_LEN)) network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id"), nullable=False) fixed_ips = orm.relationship(IPAllocation, backref='port', lazy='joined', passive_deletes='all') mac_address = sa.Column(sa.String(32), nullable=False) admin_state_up = sa.Column(sa.Boolean(), nullable=False) status = sa.Column(sa.String(16), nullable=False) device_id = sa.Column(sa.String(attr.DEVICE_ID_MAX_LEN), nullable=False) device_owner = sa.Column(sa.String(attr.DEVICE_OWNER_MAX_LEN), nullable=False) __table_args__ = ( sa.Index( 'ix_ports_network_id_mac_address', 'network_id', 'mac_address'), sa.Index( 'ix_ports_network_id_device_owner', 'network_id', 'device_owner'), sa.UniqueConstraint( network_id, mac_address, name='uniq_ports0network_id0mac_address'), model_base.BASEV2.__table_args__ ) def __init__(self, id=None, tenant_id=None, name=None, network_id=None, mac_address=None, admin_state_up=None, status=None, device_id=None, device_owner=None, fixed_ips=None): self.id = id self.tenant_id = tenant_id self.name = name self.network_id = network_id self.mac_address = mac_address self.admin_state_up = admin_state_up self.device_owner = device_owner self.device_id = device_id # Since this is a relationship only set it if one is passed in. if fixed_ips: self.fixed_ips = fixed_ips # NOTE(arosen): status must be set last as an event is triggered on! self.status = status class DNSNameServer(model_base.BASEV2): """Internal representation of a DNS nameserver.""" address = sa.Column(sa.String(128), nullable=False, primary_key=True) subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id', ondelete="CASCADE"), primary_key=True) order = sa.Column(sa.Integer, nullable=False, server_default='0') class Subnet(model_base.BASEV2, HasId, HasTenant): """Represents a neutron subnet. When a subnet is created the first and last entries will be created. These are used for the IP allocation. """ name = sa.Column(sa.String(attr.NAME_MAX_LEN)) network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id')) subnetpool_id = sa.Column(sa.String(36), index=True) ip_version = sa.Column(sa.Integer, nullable=False) cidr = sa.Column(sa.String(64), nullable=False) gateway_ip = sa.Column(sa.String(64)) allocation_pools = orm.relationship(IPAllocationPool, backref='subnet', lazy="joined", cascade='delete') enable_dhcp = sa.Column(sa.Boolean()) dns_nameservers = orm.relationship(DNSNameServer, backref='subnet', cascade='all, delete, delete-orphan', order_by=DNSNameServer.order, lazy='joined') routes = orm.relationship(SubnetRoute, backref='subnet', cascade='all, delete, delete-orphan', lazy='joined') ipv6_ra_mode = sa.Column(sa.Enum(constants.IPV6_SLAAC, constants.DHCPV6_STATEFUL, constants.DHCPV6_STATELESS, name='ipv6_ra_modes'), nullable=True) ipv6_address_mode = sa.Column(sa.Enum(constants.IPV6_SLAAC, constants.DHCPV6_STATEFUL, constants.DHCPV6_STATELESS, name='ipv6_address_modes'), nullable=True) rbac_entries = association_proxy('networks', 'rbac_entries') class SubnetPoolPrefix(model_base.BASEV2): """Represents a neutron subnet pool prefix """ __tablename__ = 'subnetpoolprefixes' cidr = sa.Column(sa.String(64), nullable=False, primary_key=True) subnetpool_id = sa.Column(sa.String(36), sa.ForeignKey('subnetpools.id'), nullable=False, primary_key=True) class SubnetPool(model_base.BASEV2, HasId, HasTenant): """Represents a neutron subnet pool. """ name = sa.Column(sa.String(255)) ip_version = sa.Column(sa.Integer, nullable=False) default_prefixlen = sa.Column(sa.Integer, nullable=False) min_prefixlen = sa.Column(sa.Integer, nullable=False) max_prefixlen = sa.Column(sa.Integer, nullable=False) shared = sa.Column(sa.Boolean, nullable=False) default_quota = sa.Column(sa.Integer, nullable=True) hash = sa.Column(sa.String(36), nullable=False, server_default='') address_scope_id = sa.Column(sa.String(36), nullable=True) prefixes = orm.relationship(SubnetPoolPrefix, backref='subnetpools', cascade='all, delete, delete-orphan', lazy='joined') class Network(model_base.BASEV2, HasId, HasTenant): """Represents a v2 neutron network.""" name = sa.Column(sa.String(attr.NAME_MAX_LEN)) ports = orm.relationship(Port, backref='networks') subnets = orm.relationship( Subnet, backref=orm.backref('networks', lazy='joined'), lazy="joined") status = sa.Column(sa.String(16)) admin_state_up = sa.Column(sa.Boolean) mtu = sa.Column(sa.Integer, nullable=True) vlan_transparent = sa.Column(sa.Boolean, nullable=True) rbac_entries = orm.relationship("NetworkRBAC", backref='network', lazy='joined', cascade='all, delete, delete-orphan')
apache-2.0
schumi2004/NOT_UPDATED_Sick-Beard-Dutch
lib/hachoir_core/i18n.py
90
6253
# -*- coding: UTF-8 -*- """ Functions to manage internationalisation (i18n): - initLocale(): setup locales and install Unicode compatible stdout and stderr ; - getTerminalCharset(): guess terminal charset ; - gettext(text) translate a string to current language. The function always returns Unicode string. You can also use the alias: _() ; - ngettext(singular, plural, count): translate a sentence with singular and plural form. The function always returns Unicode string. WARNING: Loading this module indirectly calls initLocale() which sets locale LC_ALL to ''. This is needed to get user preferred locale settings. """ import lib.hachoir_core.config as config import lib.hachoir_core import locale from os import path import sys from codecs import BOM_UTF8, BOM_UTF16_LE, BOM_UTF16_BE def _getTerminalCharset(): """ Function used by getTerminalCharset() to get terminal charset. @see getTerminalCharset() """ # (1) Try locale.getpreferredencoding() try: charset = locale.getpreferredencoding() if charset: return charset except (locale.Error, AttributeError): pass # (2) Try locale.nl_langinfo(CODESET) try: charset = locale.nl_langinfo(locale.CODESET) if charset: return charset except (locale.Error, AttributeError): pass # (3) Try sys.stdout.encoding if hasattr(sys.stdout, "encoding") and sys.stdout.encoding: return sys.stdout.encoding # (4) Otherwise, returns "ASCII" return "ASCII" def getTerminalCharset(): """ Guess terminal charset using differents tests: 1. Try locale.getpreferredencoding() 2. Try locale.nl_langinfo(CODESET) 3. Try sys.stdout.encoding 4. Otherwise, returns "ASCII" WARNING: Call initLocale() before calling this function. """ try: return getTerminalCharset.value except AttributeError: getTerminalCharset.value = _getTerminalCharset() return getTerminalCharset.value class UnicodeStdout(object): def __init__(self, old_device, charset): self.device = old_device self.charset = charset def flush(self): self.device.flush() def write(self, text): if isinstance(text, unicode): text = text.encode(self.charset, 'replace') self.device.write(text) def writelines(self, lines): for text in lines: self.write(text) def initLocale(): # Only initialize locale once if initLocale.is_done: return getTerminalCharset() initLocale.is_done = True # Setup locales try: locale.setlocale(locale.LC_ALL, "") except (locale.Error, IOError): pass # Get the terminal charset charset = getTerminalCharset() # UnicodeStdout conflicts with the readline module if config.unicode_stdout and ('readline' not in sys.modules): # Replace stdout and stderr by unicode objet supporting unicode string sys.stdout = UnicodeStdout(sys.stdout, charset) sys.stderr = UnicodeStdout(sys.stderr, charset) return charset initLocale.is_done = False def _dummy_gettext(text): return unicode(text) def _dummy_ngettext(singular, plural, count): if 1 < abs(count) or not count: return unicode(plural) else: return unicode(singular) def _initGettext(): charset = initLocale() # Try to load gettext module if config.use_i18n: try: import gettext ok = True except ImportError: ok = False else: ok = False # gettext is not available or not needed: use dummy gettext functions if not ok: return (_dummy_gettext, _dummy_ngettext) # Gettext variables package = lib.hachoir_core.PACKAGE locale_dir = path.join(path.dirname(__file__), "..", "locale") # Initialize gettext module gettext.bindtextdomain(package, locale_dir) gettext.textdomain(package) translate = gettext.gettext ngettext = gettext.ngettext # TODO: translate_unicode lambda function really sucks! # => find native function to do that unicode_gettext = lambda text: \ unicode(translate(text), charset) unicode_ngettext = lambda singular, plural, count: \ unicode(ngettext(singular, plural, count), charset) return (unicode_gettext, unicode_ngettext) UTF_BOMS = ( (BOM_UTF8, "UTF-8"), (BOM_UTF16_LE, "UTF-16-LE"), (BOM_UTF16_BE, "UTF-16-BE"), ) # Set of valid characters for specific charset CHARSET_CHARACTERS = ( # U+00E0: LATIN SMALL LETTER A WITH GRAVE (set(u"©®éêè\xE0ç".encode("ISO-8859-1")), "ISO-8859-1"), (set(u"©®éêè\xE0ç€".encode("ISO-8859-15")), "ISO-8859-15"), (set(u"©®".encode("MacRoman")), "MacRoman"), (set(u"εδηιθκμοΡσςυΈί".encode("ISO-8859-7")), "ISO-8859-7"), ) def guessBytesCharset(bytes, default=None): r""" >>> guessBytesCharset("abc") 'ASCII' >>> guessBytesCharset("\xEF\xBB\xBFabc") 'UTF-8' >>> guessBytesCharset("abc\xC3\xA9") 'UTF-8' >>> guessBytesCharset("File written by Adobe Photoshop\xA8 4.0\0") 'MacRoman' >>> guessBytesCharset("\xE9l\xE9phant") 'ISO-8859-1' >>> guessBytesCharset("100 \xA4") 'ISO-8859-15' >>> guessBytesCharset('Word \xb8\xea\xe4\xef\xf3\xe7 - Microsoft Outlook 97 - \xd1\xf5\xe8\xec\xdf\xf3\xe5\xe9\xf2 e-mail') 'ISO-8859-7' """ # Check for UTF BOM for bom_bytes, charset in UTF_BOMS: if bytes.startswith(bom_bytes): return charset # Pure ASCII? try: text = unicode(bytes, 'ASCII', 'strict') return 'ASCII' except UnicodeDecodeError: pass # Valid UTF-8? try: text = unicode(bytes, 'UTF-8', 'strict') return 'UTF-8' except UnicodeDecodeError: pass # Create a set of non-ASCII characters non_ascii_set = set( byte for byte in bytes if ord(byte) >= 128 ) for characters, charset in CHARSET_CHARACTERS: if characters.issuperset(non_ascii_set): return charset return default # Initialize _(), gettext() and ngettext() functions gettext, ngettext = _initGettext() _ = gettext
gpl-3.0
beeftornado/sentry
src/sentry/ingest/inbound_filters.py
1
7573
from __future__ import absolute_import from rest_framework import serializers from sentry import tsdb from sentry.relay.utils import to_camel_case_name from sentry.api.fields.multiplechoice import MultipleChoiceField from sentry.models.projectoption import ProjectOption from sentry.signals import inbound_filter_toggled class FilterStatKeys(object): """ NOTE: This enum also exists in Relay, check if alignment is needed when editing this. """ IP_ADDRESS = "ip-address" RELEASE_VERSION = "release-version" ERROR_MESSAGE = "error-message" BROWSER_EXTENSION = "browser-extensions" LEGACY_BROWSER = "legacy-browsers" LOCALHOST = "localhost" WEB_CRAWLER = "web-crawlers" INVALID_CSP = "invalid-csp" CORS = "cors" DISCARDED_HASH = "discarded-hash" # Not replicated in Relay CRASH_REPORT_LIMIT = "crash-report-limit" # Not replicated in Relay FILTER_STAT_KEYS_TO_VALUES = { FilterStatKeys.IP_ADDRESS: tsdb.models.project_total_received_ip_address, FilterStatKeys.RELEASE_VERSION: tsdb.models.project_total_received_release_version, FilterStatKeys.ERROR_MESSAGE: tsdb.models.project_total_received_error_message, FilterStatKeys.BROWSER_EXTENSION: tsdb.models.project_total_received_browser_extensions, FilterStatKeys.LEGACY_BROWSER: tsdb.models.project_total_received_legacy_browsers, FilterStatKeys.LOCALHOST: tsdb.models.project_total_received_localhost, FilterStatKeys.WEB_CRAWLER: tsdb.models.project_total_received_web_crawlers, FilterStatKeys.INVALID_CSP: tsdb.models.project_total_received_invalid_csp, FilterStatKeys.CORS: tsdb.models.project_total_received_cors, FilterStatKeys.DISCARDED_HASH: tsdb.models.project_total_received_discarded, } class FilterTypes(object): ERROR_MESSAGES = "error_messages" RELEASES = "releases" def get_filter_key(flt): return to_camel_case_name(flt.id.replace("-", "_")) def get_all_filter_specs(): """ Return metadata about the filters known by Sentry. An event filter is a function that receives a project_config and an event data payload and returns a tuple (should_filter:bool, filter_reason: string | None) representing :return: list of registered event filters """ return ( _localhost_filter, _browser_extensions_filter, _legacy_browsers_filter, _web_crawlers_filter, ) def set_filter_state(filter_id, project, state): flt = _filter_from_filter_id(filter_id) if flt is None: raise FilterNotRegistered(filter_id) if flt == _legacy_browsers_filter: if state is None: state = {} option_val = "0" if "active" in state: if state["active"]: option_val = "1" elif "subfilters" in state and len(state["subfilters"]) > 0: option_val = set(state["subfilters"]) ProjectOption.objects.set_value( project=project, key=u"filters:{}".format(filter_id), value=option_val ) return option_val == "1" if option_val in ("0", "1") else option_val else: # all boolean filters if state is None: state = {"active": True} ProjectOption.objects.set_value( project=project, key=u"filters:{}".format(filter_id), value="1" if state.get("active", False) else "0", ) if state: inbound_filter_toggled.send(project=project, sender=flt) return state.get("active", False) def get_filter_state(filter_id, project): """ Returns the filter state IMPORTANT: this function accesses the database, it should NEVER be used by the ingestion pipe. This api is used by the ProjectFilterDetails and ProjectFilters endpoints :param filter_id: the filter Id :param project: the project for which we want the filter state :return: True if the filter is enabled False otherwise :raises: ValueError if filter id not registered """ flt = _filter_from_filter_id(filter_id) if flt is None: raise FilterNotRegistered(filter_id) filter_state = ProjectOption.objects.get_value( project=project, key=u"filters:{}".format(flt.id) ) if filter_state is None: raise ValueError( "Could not find filter state for filter {0}." " You need to register default filter state in projectoptions.defaults.".format( filter_id ) ) if flt == _legacy_browsers_filter: # special handling for legacy browser state if filter_state == "1": return True if filter_state == "0": return False return filter_state else: return filter_state == "1" class FilterNotRegistered(Exception): pass def _filter_from_filter_id(filter_id): """ Returns the corresponding filter for a filter id or None if no filter with the given id found """ for flt in get_all_filter_specs(): if flt.id == filter_id: return flt return None class _FilterSerializer(serializers.Serializer): active = serializers.BooleanField() class _FilterSpec(object): """ Data associated with a filter, it defines its name, id, default enable state and how its state is serialized in the database """ def __init__(self, id, name, description, serializer_cls=None): self.id = id self.name = name self.description = description if serializer_cls is None: self.serializer_cls = _FilterSerializer else: self.serializer_cls = serializer_cls def _get_filter_settings(project_config, flt): """ Gets the filter options from the relay config or the default option if not specified in the relay config :param project_config: the relay config for the request :param flt: the filter :return: the options for the filter """ filter_settings = project_config.config.get("filterSettings", {}) return filter_settings.get(get_filter_key(flt), None) _localhost_filter = _FilterSpec( id=FilterStatKeys.LOCALHOST, name="Filter out events coming from localhost", description="This applies to both IPv4 (``127.0.0.1``) and IPv6 (``::1``) addresses.", ) _browser_extensions_filter = _FilterSpec( id=FilterStatKeys.BROWSER_EXTENSION, name="Filter out errors known to be caused by browser extensions", description="Certain browser extensions will inject inline scripts and are known to cause errors.", ) class _LegacyBrowserFilterSerializer(serializers.Serializer): active = serializers.BooleanField() subfilters = MultipleChoiceField( choices=[ "ie_pre_9", "ie9", "ie10", "ie11", "opera_pre_15", "android_pre_4", "safari_pre_6", "opera_mini_pre_8", ] ) _legacy_browsers_filter = _FilterSpec( id=FilterStatKeys.LEGACY_BROWSER, name="Filter out known errors from legacy browsers", description="Older browsers often give less accurate information, and while they may report valid issues, " "the context to understand them is incorrect or missing.", serializer_cls=_LegacyBrowserFilterSerializer, ) _web_crawlers_filter = _FilterSpec( id=FilterStatKeys.WEB_CRAWLER, name="Filter out known web crawlers", description="Some crawlers may execute pages in incompatible ways which then cause errors that" " are unlikely to be seen by a normal user.", )
bsd-3-clause
kubeless/kubeless
docker/event-sources/kubernetes/events.py
6
3189
import asyncio import logging import json from kubernetes import client, config, watch from kafka import KafkaProducer from kafka.errors import KafkaError logger = logging.getLogger('k8s_events') logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') ch.setFormatter(formatter) logger.addHandler(ch) #config.load_kube_config() config.load_incluster_config() v1 = client.CoreV1Api() v1ext = client.ExtensionsV1beta1Api() producer=KafkaProducer(bootstrap_servers='kafka.kubeless:9092',value_serializer=lambda v: json.dumps(v).encode('utf-8')) @asyncio.coroutine def pods(): w = watch.Watch() for event in w.stream(v1.list_pod_for_all_namespaces): logger.info("Event: %s %s %s" % (event['type'], event['object'].kind, event['object'].metadata.name)) msg = {'type':event['type'],'object':event['raw_object']} producer.send('k8s', msg) producer.flush() yield from asyncio.sleep(0.1) @asyncio.coroutine def namespaces(): w = watch.Watch() for event in w.stream(v1.list_namespace): logger.info("Event: %s %s %s" % (event['type'], event['object'].kind, event['object'].metadata.name)) msg = {'type':event['type'],'object':event['raw_object']} producer.send('k8s', msg) producer.flush() yield from asyncio.sleep(0.1) @asyncio.coroutine def services(): w = watch.Watch() for event in w.stream(v1.list_service_for_all_namespaces): logger.info("Event: %s %s %s" % (event['type'], event['object'].kind, event['object'].metadata.name)) producer=KafkaProducer(bootstrap_servers='kafka.kubeless:9092',value_serializer=lambda v: json.dumps(v).encode('utf-8')) msg = {'type':event['type'],'object':event['raw_object']} producer.send('k8s', msg) producer.flush() yield from asyncio.sleep(0.1) @asyncio.coroutine def deployments(): w = watch.Watch() for event in w.stream(v1ext.list_deployment_for_all_namespaces): logger.info("Event: %s %s %s" % (event['type'], event['object'].kind, event['object'].metadata.name)) msg = {'type':event['type'],'object':event['raw_object']} producer.send('k8s', msg) producer.flush() yield from asyncio.sleep(0.1) @asyncio.coroutine def replicasets(): w = watch.Watch() for event in w.stream(v1ext.list_replica_set_for_all_namespaces): logger.info("Event: %s %s %s" % (event['type'], event['object'].kind, event['object'].metadata.name)) msg = {'type':event['type'],'object':event['raw_object']} producer.send('k8s', msg) producer.flush() yield from asyncio.sleep(0.1) ioloop = asyncio.get_event_loop() ioloop.create_task(pods()) ioloop.create_task(namespaces()) ioloop.create_task(services()) ioloop.create_task(deployments()) ioloop.create_task(replicasets()) try: # Blocking call interrupted by loop.stop() print('step: loop.run_forever()') ioloop.run_forever() except KeyboardInterrupt: pass finally: print('step: loop.close()') ioloop.close()
apache-2.0
mabotech/mabo.ai
py/webservices/orgService.py
3
5747
# -*- coding: utf-8 -*- """ IAMService """ import time import xml.sax.saxutils as saxutils # post xml soap message import sys, httplib from lxml import etree from cStringIO import StringIO #import static import toml class IAMClient(object): def __init__(self): conf_fn = "config.toml" with open(conf_fn) as conf_fh: self.conf = toml.loads(conf_fh.read()) print(self.conf) def searchAll(self, startPage, pageSize ): #config = static.ERP_CONFIG #'SL 8.0' query = {"username":self.conf["Admin"],"password":self.conf["Admin_Password"], "nonce":self.conf["Nonce"], "startPage":startPage, "pageSize": pageSize} SM_TEMPLATE = r"""<?xml version="1.0" encoding="UTF-8"?> <soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:sear="http://search.service.iam.foton.com/"> <soapenv:Header> <wsse:Security soapenv:mustUnderstand="1" xmlns:wsse="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd"> <wsse:UsernameToken wsu:Id="UsernameToken-1" xmlns:wsu="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd"> <wsse:Username>%(username)s</wsse:Username> <wsse:Password Type="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-username-token-profile-1.0#PasswordText">%(password)s</wsse:Password> <wsse:Nonce EncodingType="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-soap-message-security-1.0#Base64Binary">%(nonce)s</wsse:Nonce> <wsu:Created>2012-07-06T01:49:02.953Z</wsu:Created> </wsse:UsernameToken> </wsse:Security> </soapenv:Header> <soapenv:Body> <sear:searchAll> <arg0>%(startPage)s</arg0> <arg1>%(pageSize)s</arg1> <!--Optional:--> <arg2>ou</arg2> <arg3>true</arg3> </sear:searchAll> </soapenv:Body> </soapenv:Envelope>""" % query SoapMessage = SM_TEMPLATE #print SoapMessage #construct and send the header host =self.conf["HOST"] print(host) webservice = httplib.HTTP(host) service = self.conf["Service2"] url = "/IAMService/services/soap/%s" %(service) webservice.putrequest("POST", url) webservice.putheader("Host", host) webservice.putheader("User-Agent", "Mozilla/4.0+(compatible;+MSIE+6.0;+Windows+NT+5.2;+SV1;+.NET+CLR+1.1.4322)") webservice.putheader("Content-type", "text/xml; charset=\"UTF-8\"") webservice.putheader("Accept-Language", "en-us") webservice.putheader("Content-length", "%d" % len(SoapMessage)) #webservice.putheader("SOAPAction", "authenticate") webservice.endheaders() webservice.send(SoapMessage) # get the response statuscode, statusmessage, header = webservice.getreply() print "Response: ", statuscode, statusmessage, startPage #print "headers: ", header #print dir(webservice) res = webservice.getfile().read() fn = "%d.xml" %(time.time()) #print res #with open(fn, 'w') as fh: # fh.write(res) return res #self.parseSessionToken(res) def getResponse(self, xmlstr): string_file = StringIO(xmlstr.replace('soap:','')) #root = etree.fromstring(xml) tree = etree.parse(string_file) resp = None for element in tree.xpath('/Envelope/Body'): resp = element[0][1].text return resp def getResult(self, xmlstr): resp = self.getResponse(xmlstr) string_file = StringIO(resp) #root = etree.fromstring(xml) tree = etree.parse(string_file) result = None v = tree.xpath('/Parameters')[0] l = len(v) result = v[l-1].text if result.count('successful') >0: return "S" else: return "F" def get_element_text(element, node): v = element.xpath(node) if len(v)>0: #print v[0].text.encode("utf8") return v[0].text.encode("utf8") else: return "" def main(): cm = IAMClient() fh = open("id3.csv","w") for i in range(1, 20): xmlstr = cm.searchAll(i,10) string_file = StringIO(xmlstr.replace('soap:','').replace("ns2:","")) #root = etree.fromstring(xml) tree = etree.parse(string_file) resp = None for element in tree.xpath('/Envelope/Body/searchAllResponse/return/userData'): #resp = element[0][1].text #print "\n" v1 = get_element_text(element, "cn") v2 = get_element_text(element, "mail") v3 = get_element_text(element, "fotonAppAtt37") v4 = get_element_text(element, "mobile") v5 = get_element_text(element, "telephoneNumber") v6 = get_element_text(element, "uid") v7 = get_element_text(element, "ou") #print userPassword[0].text, x = "%s,%s,%s,%s,%s,%s,%s\n" % (v1, v2, v3, v4, v5, v6, v7) fh.write(x) time.sleep(0.5) fh.close() """ token = cm.parseSessionToken(xmlstr) rtn = cm.callMethod(token, "") print cm.getResult(rtn) """ if __name__ == '__main__': main()
mit
Intel-Corporation/tensorflow
tensorflow/python/distribute/device_util_test.py
22
3457
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for device utilities.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.distribute import device_util from tensorflow.python.eager import context from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.platform import test class DeviceUtilTest(test.TestCase): @test_util.run_deprecated_v1 def testCurrentDeviceWithGlobalGraph(self): with ops.device("/cpu:0"): self.assertEqual(device_util.current(), "/device:CPU:0") with ops.device("/job:worker"): with ops.device("/cpu:0"): self.assertEqual(device_util.current(), "/job:worker/device:CPU:0") with ops.device("/cpu:0"): with ops.device("/gpu:0"): self.assertEqual(device_util.current(), "/device:GPU:0") def testCurrentDeviceWithNonGlobalGraph(self): with ops.Graph().as_default(): with ops.device("/cpu:0"): self.assertEqual(device_util.current(), "/device:CPU:0") def testCurrentDeviceWithEager(self): with context.eager_mode(): with ops.device("/cpu:0"): self.assertEqual(device_util.current(), "/job:localhost/replica:0/task:0/device:CPU:0") @test_util.run_deprecated_v1 def testCanonicalizeWithoutDefaultDevice(self): self.assertEqual( device_util.canonicalize("/cpu:0"), "/replica:0/task:0/device:CPU:0") self.assertEqual( device_util.canonicalize("/job:worker/cpu:0"), "/job:worker/replica:0/task:0/device:CPU:0") self.assertEqual( device_util.canonicalize("/job:worker/task:1/cpu:0"), "/job:worker/replica:0/task:1/device:CPU:0") def testCanonicalizeWithDefaultDevice(self): self.assertEqual( device_util.canonicalize("/job:worker/task:1/cpu:0", default="/gpu:0"), "/job:worker/replica:0/task:1/device:CPU:0") self.assertEqual( device_util.canonicalize("/job:worker/task:1", default="/gpu:0"), "/job:worker/replica:0/task:1/device:GPU:0") self.assertEqual( device_util.canonicalize("/cpu:0", default="/job:worker"), "/job:worker/replica:0/task:0/device:CPU:0") def testResolveWithDeviceScope(self): with ops.device("/gpu:0"): self.assertEqual( device_util.resolve("/job:worker/task:1/cpu:0"), "/job:worker/replica:0/task:1/device:CPU:0") self.assertEqual( device_util.resolve("/job:worker/task:1"), "/job:worker/replica:0/task:1/device:GPU:0") with ops.device("/job:worker"): self.assertEqual( device_util.resolve("/cpu:0"), "/job:worker/replica:0/task:0/device:CPU:0") if __name__ == "__main__": test.main()
apache-2.0
mbr/flask
flask/signals.py
191
2209
# -*- coding: utf-8 -*- """ flask.signals ~~~~~~~~~~~~~ Implements signals based on blinker if available, otherwise falls silently back to a noop. :copyright: (c) 2015 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ signals_available = False try: from blinker import Namespace signals_available = True except ImportError: class Namespace(object): def signal(self, name, doc=None): return _FakeSignal(name, doc) class _FakeSignal(object): """If blinker is unavailable, create a fake class with the same interface that allows sending of signals but will fail with an error on anything else. Instead of doing anything on send, it will just ignore the arguments and do nothing instead. """ def __init__(self, name, doc=None): self.name = name self.__doc__ = doc def _fail(self, *args, **kwargs): raise RuntimeError('signalling support is unavailable ' 'because the blinker library is ' 'not installed.') send = lambda *a, **kw: None connect = disconnect = has_receivers_for = receivers_for = \ temporarily_connected_to = connected_to = _fail del _fail # The namespace for code signals. If you are not flask code, do # not put signals in here. Create your own namespace instead. _signals = Namespace() # Core signals. For usage examples grep the source code or consult # the API documentation in docs/api.rst as well as docs/signals.rst template_rendered = _signals.signal('template-rendered') before_render_template = _signals.signal('before-render-template') request_started = _signals.signal('request-started') request_finished = _signals.signal('request-finished') request_tearing_down = _signals.signal('request-tearing-down') got_request_exception = _signals.signal('got-request-exception') appcontext_tearing_down = _signals.signal('appcontext-tearing-down') appcontext_pushed = _signals.signal('appcontext-pushed') appcontext_popped = _signals.signal('appcontext-popped') message_flashed = _signals.signal('message-flashed')
bsd-3-clause
jalexvig/tensorflow
tensorflow/python/estimator/inputs/inputs.py
20
1086
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utility methods to create simple input_fns.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=unused-import,line-too-long from tensorflow.python.estimator.inputs.numpy_io import numpy_input_fn from tensorflow.python.estimator.inputs.pandas_io import pandas_input_fn # pylint: enable=unused-import,line-too-long
apache-2.0
massot/odoo
addons/edi/__init__.py
437
1157
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (c) 2011 OpenERP S.A. <http://openerp.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from . import controllers from . import models from . import edi_service from .models.edi import EDIMixin, edi # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
mkrupcale/ansible
lib/ansible/modules/packaging/os/pkg5_publisher.py
23
5898
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright 2014 Peter Oliver <ansible@mavit.org.uk> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = ''' --- module: pkg5_publisher author: "Peter Oliver (@mavit)" short_description: Manages Solaris 11 Image Packaging System publishers version_added: 1.9 description: - IPS packages are the native packages in Solaris 11 and higher. - This modules will configure which publishers a client will download IPS packages from. options: name: description: - The publisher's name. required: true aliases: [ publisher ] state: description: - Whether to ensure that a publisher is present or absent. required: false default: present choices: [ present, absent ] sticky: description: - Packages installed from a sticky repository can only receive updates from that repository. required: false default: null choices: [ true, false ] enabled: description: - Is the repository enabled or disabled? required: false default: null choices: [ true, false ] origin: description: - A path or URL to the repository. - Multiple values may be provided. required: false default: null mirror: description: - A path or URL to the repository mirror. - Multiple values may be provided. required: false default: null ''' EXAMPLES = ''' # Fetch packages for the solaris publisher direct from Oracle: - pkg5_publisher: name: solaris sticky: true origin: https://pkg.oracle.com/solaris/support/ # Configure a publisher for locally-produced packages: - pkg5_publisher: name: site origin: 'https://pkg.example.com/site/' ''' def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True, aliases=['publisher']), state=dict(default='present', choices=['present', 'absent']), sticky=dict(type='bool'), enabled=dict(type='bool'), # search_after=dict(), # search_before=dict(), origin=dict(type='list'), mirror=dict(type='list'), ) ) for option in ['origin', 'mirror']: if module.params[option] == ['']: module.params[option] = [] if module.params['state'] == 'present': modify_publisher(module, module.params) else: unset_publisher(module, module.params['name']) def modify_publisher(module, params): name = params['name'] existing = get_publishers(module) if name in existing: for option in ['origin', 'mirror', 'sticky', 'enabled']: if params[option] != None: if params[option] != existing[name][option]: return set_publisher(module, params) else: return set_publisher(module, params) module.exit_json() def set_publisher(module, params): name = params['name'] args = [] if params['origin'] != None: args.append('--remove-origin=*') args.extend(['--add-origin=' + u for u in params['origin']]) if params['mirror'] != None: args.append('--remove-mirror=*') args.extend(['--add-mirror=' + u for u in params['mirror']]) if params['sticky'] != None and params['sticky']: args.append('--sticky') elif params['sticky'] != None: args.append('--non-sticky') if params['enabled'] != None and params['enabled']: args.append('--enable') elif params['enabled'] != None: args.append('--disable') rc, out, err = module.run_command( ["pkg", "set-publisher"] + args + [name], check_rc=True ) response = { 'rc': rc, 'results': [out], 'msg': err, 'changed': True, } module.exit_json(**response) def unset_publisher(module, publisher): if not publisher in get_publishers(module): module.exit_json() rc, out, err = module.run_command( ["pkg", "unset-publisher", publisher], check_rc=True ) response = { 'rc': rc, 'results': [out], 'msg': err, 'changed': True, } module.exit_json(**response) def get_publishers(module): rc, out, err = module.run_command(["pkg", "publisher", "-Ftsv"], True) lines = out.splitlines() keys = lines.pop(0).lower().split("\t") publishers = {} for line in lines: values = dict(zip(keys, map(unstringify, line.split("\t")))) name = values['publisher'] if not name in publishers: publishers[name] = dict( (k, values[k]) for k in ['sticky', 'enabled'] ) publishers[name]['origin'] = [] publishers[name]['mirror'] = [] if values['type'] is not None: publishers[name][values['type']].append(values['uri']) return publishers def unstringify(val): if val == "-" or val == '': return None elif val == "true": return True elif val == "false": return False else: return val from ansible.module_utils.basic import * if __name__ == '__main__': main()
gpl-3.0
kracekumar/python-driver
tests/unit/test_connection.py
4
17019
# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. try: import unittest2 as unittest except ImportError: import unittest # noqa from mock import Mock, ANY, call, patch import six from six import BytesIO import time from threading import Lock from cassandra.cluster import Cluster from cassandra.connection import (Connection, HEADER_DIRECTION_TO_CLIENT, HEADER_DIRECTION_FROM_CLIENT, ProtocolError, locally_supported_compressions, ConnectionHeartbeat) from cassandra.marshal import uint8_pack, uint32_pack from cassandra.protocol import (write_stringmultimap, write_int, write_string, SupportedMessage) class ConnectionTest(unittest.TestCase): protocol_version = 2 def make_connection(self): c = Connection('1.2.3.4') c._socket = Mock() c._socket.send.side_effect = lambda x: len(x) return c def make_header_prefix(self, message_class, version=2, stream_id=0): return six.binary_type().join(map(uint8_pack, [ 0xff & (HEADER_DIRECTION_TO_CLIENT | version), 0, # flags (compression) stream_id, message_class.opcode # opcode ])) def make_options_body(self): options_buf = BytesIO() write_stringmultimap(options_buf, { 'CQL_VERSION': ['3.0.1'], 'COMPRESSION': [] }) return options_buf.getvalue() def make_error_body(self, code, msg): buf = BytesIO() write_int(buf, code) write_string(buf, msg) return buf.getvalue() def make_msg(self, header, body=""): return header + uint32_pack(len(body)) + body def test_bad_protocol_version(self, *args): c = self.make_connection() c._callbacks = Mock() c.defunct = Mock() # read in a SupportedMessage response header = self.make_header_prefix(SupportedMessage, version=0x04) options = self.make_options_body() message = self.make_msg(header, options) c.process_msg(message, len(message) - 8) # make sure it errored correctly c.defunct.assert_called_once_with(ANY) args, kwargs = c.defunct.call_args self.assertIsInstance(args[0], ProtocolError) def test_bad_header_direction(self, *args): c = self.make_connection() c._callbacks = Mock() c.defunct = Mock() # read in a SupportedMessage response header = six.binary_type().join(uint8_pack(i) for i in ( 0xff & (HEADER_DIRECTION_FROM_CLIENT | self.protocol_version), 0, # flags (compression) 0, SupportedMessage.opcode # opcode )) options = self.make_options_body() message = self.make_msg(header, options) c.process_msg(message, len(message) - 8) # make sure it errored correctly c.defunct.assert_called_once_with(ANY) args, kwargs = c.defunct.call_args self.assertIsInstance(args[0], ProtocolError) def test_negative_body_length(self, *args): c = self.make_connection() c._callbacks = Mock() c.defunct = Mock() # read in a SupportedMessage response header = self.make_header_prefix(SupportedMessage) options = self.make_options_body() message = self.make_msg(header, options) c.process_msg(message, -13) # make sure it errored correctly c.defunct.assert_called_once_with(ANY) args, kwargs = c.defunct.call_args self.assertIsInstance(args[0], ProtocolError) def test_unsupported_cql_version(self, *args): c = self.make_connection() c._callbacks = {0: c._handle_options_response} c.defunct = Mock() c.cql_version = "3.0.3" # read in a SupportedMessage response header = self.make_header_prefix(SupportedMessage) options_buf = BytesIO() write_stringmultimap(options_buf, { 'CQL_VERSION': ['7.8.9'], 'COMPRESSION': [] }) options = options_buf.getvalue() message = self.make_msg(header, options) c.process_msg(message, len(message) - 8) # make sure it errored correctly c.defunct.assert_called_once_with(ANY) args, kwargs = c.defunct.call_args self.assertIsInstance(args[0], ProtocolError) def test_prefer_lz4_compression(self, *args): c = self.make_connection() c._callbacks = {0: c._handle_options_response} c.defunct = Mock() c.cql_version = "3.0.3" locally_supported_compressions.pop('lz4', None) locally_supported_compressions.pop('snappy', None) locally_supported_compressions['lz4'] = ('lz4compress', 'lz4decompress') locally_supported_compressions['snappy'] = ('snappycompress', 'snappydecompress') # read in a SupportedMessage response header = self.make_header_prefix(SupportedMessage) options_buf = BytesIO() write_stringmultimap(options_buf, { 'CQL_VERSION': ['3.0.3'], 'COMPRESSION': ['snappy', 'lz4'] }) options = options_buf.getvalue() message = self.make_msg(header, options) c.process_msg(message, len(message) - 8) self.assertEqual(c.decompressor, locally_supported_compressions['lz4'][1]) def test_requested_compression_not_available(self, *args): c = self.make_connection() c._callbacks = {0: c._handle_options_response} c.defunct = Mock() # request lz4 compression c.compression = "lz4" locally_supported_compressions.pop('lz4', None) locally_supported_compressions.pop('snappy', None) locally_supported_compressions['lz4'] = ('lz4compress', 'lz4decompress') locally_supported_compressions['snappy'] = ('snappycompress', 'snappydecompress') # read in a SupportedMessage response header = self.make_header_prefix(SupportedMessage) # the server only supports snappy options_buf = BytesIO() write_stringmultimap(options_buf, { 'CQL_VERSION': ['3.0.3'], 'COMPRESSION': ['snappy'] }) options = options_buf.getvalue() message = self.make_msg(header, options) c.process_msg(message, len(message) - 8) # make sure it errored correctly c.defunct.assert_called_once_with(ANY) args, kwargs = c.defunct.call_args self.assertIsInstance(args[0], ProtocolError) def test_use_requested_compression(self, *args): c = self.make_connection() c._callbacks = {0: c._handle_options_response} c.defunct = Mock() # request snappy compression c.compression = "snappy" locally_supported_compressions.pop('lz4', None) locally_supported_compressions.pop('snappy', None) locally_supported_compressions['lz4'] = ('lz4compress', 'lz4decompress') locally_supported_compressions['snappy'] = ('snappycompress', 'snappydecompress') # read in a SupportedMessage response header = self.make_header_prefix(SupportedMessage) # the server only supports snappy options_buf = BytesIO() write_stringmultimap(options_buf, { 'CQL_VERSION': ['3.0.3'], 'COMPRESSION': ['snappy', 'lz4'] }) options = options_buf.getvalue() message = self.make_msg(header, options) c.process_msg(message, len(message) - 8) self.assertEqual(c.decompressor, locally_supported_compressions['snappy'][1]) def test_disable_compression(self, *args): c = self.make_connection() c._callbacks = {0: c._handle_options_response} c.defunct = Mock() # disable compression c.compression = False locally_supported_compressions.pop('lz4', None) locally_supported_compressions.pop('snappy', None) locally_supported_compressions['lz4'] = ('lz4compress', 'lz4decompress') locally_supported_compressions['snappy'] = ('snappycompress', 'snappydecompress') # read in a SupportedMessage response header = self.make_header_prefix(SupportedMessage) # the server only supports snappy options_buf = BytesIO() write_stringmultimap(options_buf, { 'CQL_VERSION': ['3.0.3'], 'COMPRESSION': ['snappy', 'lz4'] }) options = options_buf.getvalue() message = self.make_msg(header, options) c.process_msg(message, len(message) - 8) self.assertEqual(c.decompressor, None) def test_not_implemented(self): """ Ensure the following methods throw NIE's. If not, come back and test them. """ c = self.make_connection() self.assertRaises(NotImplementedError, c.close) self.assertRaises(NotImplementedError, c.register_watcher, None, None) self.assertRaises(NotImplementedError, c.register_watchers, None) def test_set_keyspace_blocking(self): c = self.make_connection() self.assertEqual(c.keyspace, None) c.set_keyspace_blocking(None) self.assertEqual(c.keyspace, None) c.keyspace = 'ks' c.set_keyspace_blocking('ks') self.assertEqual(c.keyspace, 'ks') def test_set_connection_class(self): cluster = Cluster(connection_class='test') self.assertEqual('test', cluster.connection_class) @patch('cassandra.connection.ConnectionHeartbeat._raise_if_stopped') class ConnectionHeartbeatTest(unittest.TestCase): @staticmethod def make_get_holders(len): holders = [] for _ in range(len): holder = Mock() holder.get_connections = Mock(return_value=[]) holders.append(holder) get_holders = Mock(return_value=holders) return get_holders def run_heartbeat(self, get_holders_fun, count=2, interval=0.05): ch = ConnectionHeartbeat(interval, get_holders_fun) time.sleep(interval * count) ch.stop() self.assertTrue(get_holders_fun.call_count) def test_empty_connections(self, *args): count = 3 get_holders = self.make_get_holders(1) self.run_heartbeat(get_holders, count) self.assertGreaterEqual(get_holders.call_count, count - 1) # lower bound to account for thread spinup time self.assertLessEqual(get_holders.call_count, count) holder = get_holders.return_value[0] holder.get_connections.assert_has_calls([call()] * get_holders.call_count) def test_idle_non_idle(self, *args): request_id = 999 # connection.send_msg(OptionsMessage(), connection.get_request_id(), self._options_callback) def send_msg(msg, req_id, msg_callback): msg_callback(SupportedMessage([], {})) idle_connection = Mock(spec=Connection, host='localhost', max_request_id=127, lock=Lock(), in_flight=0, is_idle=True, is_defunct=False, is_closed=False, get_request_id=lambda: request_id, send_msg=Mock(side_effect=send_msg)) non_idle_connection = Mock(spec=Connection, in_flight=0, is_idle=False, is_defunct=False, is_closed=False) get_holders = self.make_get_holders(1) holder = get_holders.return_value[0] holder.get_connections.return_value.append(idle_connection) holder.get_connections.return_value.append(non_idle_connection) self.run_heartbeat(get_holders) holder.get_connections.assert_has_calls([call()] * get_holders.call_count) self.assertEqual(idle_connection.in_flight, 0) self.assertEqual(non_idle_connection.in_flight, 0) idle_connection.send_msg.assert_has_calls([call(ANY, request_id, ANY)] * get_holders.call_count) self.assertEqual(non_idle_connection.send_msg.call_count, 0) def test_closed_defunct(self, *args): get_holders = self.make_get_holders(1) closed_connection = Mock(spec=Connection, in_flight=0, is_idle=False, is_defunct=False, is_closed=True) defunct_connection = Mock(spec=Connection, in_flight=0, is_idle=False, is_defunct=True, is_closed=False) holder = get_holders.return_value[0] holder.get_connections.return_value.append(closed_connection) holder.get_connections.return_value.append(defunct_connection) self.run_heartbeat(get_holders) holder.get_connections.assert_has_calls([call()] * get_holders.call_count) self.assertEqual(closed_connection.in_flight, 0) self.assertEqual(defunct_connection.in_flight, 0) self.assertEqual(closed_connection.send_msg.call_count, 0) self.assertEqual(defunct_connection.send_msg.call_count, 0) def test_no_req_ids(self, *args): in_flight = 3 get_holders = self.make_get_holders(1) max_connection = Mock(spec=Connection, host='localhost', lock=Lock(), max_request_id=in_flight, in_flight=in_flight, is_idle=True, is_defunct=False, is_closed=False) holder = get_holders.return_value[0] holder.get_connections.return_value.append(max_connection) self.run_heartbeat(get_holders) holder.get_connections.assert_has_calls([call()] * get_holders.call_count) self.assertEqual(max_connection.in_flight, in_flight) self.assertEqual(max_connection.send_msg.call_count, 0) self.assertEqual(max_connection.send_msg.call_count, 0) max_connection.defunct.assert_has_calls([call(ANY)] * get_holders.call_count) holder.return_connection.assert_has_calls([call(max_connection)] * get_holders.call_count) def test_unexpected_response(self, *args): request_id = 999 get_holders = self.make_get_holders(1) def send_msg(msg, req_id, msg_callback): msg_callback(object()) connection = Mock(spec=Connection, host='localhost', max_request_id=127, lock=Lock(), in_flight=0, is_idle=True, is_defunct=False, is_closed=False, get_request_id=lambda: request_id, send_msg=Mock(side_effect=send_msg)) holder = get_holders.return_value[0] holder.get_connections.return_value.append(connection) self.run_heartbeat(get_holders) self.assertEqual(connection.in_flight, get_holders.call_count) connection.send_msg.assert_has_calls([call(ANY, request_id, ANY)] * get_holders.call_count) connection.defunct.assert_has_calls([call(ANY)] * get_holders.call_count) exc = connection.defunct.call_args_list[0][0][0] self.assertIsInstance(exc, Exception) self.assertEqual(exc.args, Exception('Connection heartbeat failure').args) holder.return_connection.assert_has_calls([call(connection)] * get_holders.call_count) def test_timeout(self, *args): request_id = 999 get_holders = self.make_get_holders(1) def send_msg(msg, req_id, msg_callback): pass connection = Mock(spec=Connection, host='localhost', max_request_id=127, lock=Lock(), in_flight=0, is_idle=True, is_defunct=False, is_closed=False, get_request_id=lambda: request_id, send_msg=Mock(side_effect=send_msg)) holder = get_holders.return_value[0] holder.get_connections.return_value.append(connection) self.run_heartbeat(get_holders) self.assertEqual(connection.in_flight, get_holders.call_count) connection.send_msg.assert_has_calls([call(ANY, request_id, ANY)] * get_holders.call_count) connection.defunct.assert_has_calls([call(ANY)] * get_holders.call_count) exc = connection.defunct.call_args_list[0][0][0] self.assertIsInstance(exc, Exception) self.assertEqual(exc.args, Exception('Connection heartbeat failure').args) holder.return_connection.assert_has_calls([call(connection)] * get_holders.call_count)
apache-2.0
Carmezim/tensorflow
tensorflow/contrib/legacy_seq2seq/__init__.py
165
2433
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Deprecated library for creating sequence-to-sequence models in TensorFlow. @@attention_decoder @@basic_rnn_seq2seq @@embedding_attention_decoder @@embedding_attention_seq2seq @@embedding_rnn_decoder @@embedding_rnn_seq2seq @@embedding_tied_rnn_seq2seq @@model_with_buckets @@one2many_rnn_seq2seq @@rnn_decoder @@sequence_loss @@sequence_loss_by_example @@tied_rnn_seq2seq """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import attention_decoder from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import basic_rnn_seq2seq from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import embedding_attention_decoder from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import embedding_attention_seq2seq from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import embedding_rnn_decoder from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import embedding_rnn_seq2seq from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import embedding_tied_rnn_seq2seq from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import model_with_buckets from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import one2many_rnn_seq2seq from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import rnn_decoder from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import sequence_loss from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import sequence_loss_by_example from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import tied_rnn_seq2seq from tensorflow.python.util.all_util import remove_undocumented _allowed_symbols = [] remove_undocumented(__name__, _allowed_symbols)
apache-2.0
sanjuro/RCJK
vendor/django/utils/html.py
116
7418
"""HTML utilities suitable for global use.""" import re import string from django.utils.safestring import SafeData, mark_safe from django.utils.encoding import force_unicode from django.utils.functional import allow_lazy from django.utils.http import urlquote # Configuration for urlize() function. LEADING_PUNCTUATION = ['(', '<', '&lt;'] TRAILING_PUNCTUATION = ['.', ',', ')', '>', '\n', '&gt;'] # List of possible strings used for bullets in bulleted lists. DOTS = ['&middot;', '*', '\xe2\x80\xa2', '&#149;', '&bull;', '&#8226;'] unencoded_ampersands_re = re.compile(r'&(?!(\w+|#\d+);)') word_split_re = re.compile(r'(\s+)') punctuation_re = re.compile('^(?P<lead>(?:%s)*)(?P<middle>.*?)(?P<trail>(?:%s)*)$' % \ ('|'.join([re.escape(x) for x in LEADING_PUNCTUATION]), '|'.join([re.escape(x) for x in TRAILING_PUNCTUATION]))) simple_email_re = re.compile(r'^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$') link_target_attribute_re = re.compile(r'(<a [^>]*?)target=[^\s>]+') html_gunk_re = re.compile(r'(?:<br clear="all">|<i><\/i>|<b><\/b>|<em><\/em>|<strong><\/strong>|<\/?smallcaps>|<\/?uppercase>)', re.IGNORECASE) hard_coded_bullets_re = re.compile(r'((?:<p>(?:%s).*?[a-zA-Z].*?</p>\s*)+)' % '|'.join([re.escape(x) for x in DOTS]), re.DOTALL) trailing_empty_content_re = re.compile(r'(?:<p>(?:&nbsp;|\s|<br \/>)*?</p>\s*)+\Z') del x # Temporary variable def escape(html): """ Returns the given HTML with ampersands, quotes and angle brackets encoded. """ return mark_safe(force_unicode(html).replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;').replace('"', '&quot;').replace("'", '&#39;')) escape = allow_lazy(escape, unicode) def conditional_escape(html): """ Similar to escape(), except that it doesn't operate on pre-escaped strings. """ if isinstance(html, SafeData): return html else: return escape(html) def linebreaks(value, autoescape=False): """Converts newlines into <p> and <br />s.""" value = re.sub(r'\r\n|\r|\n', '\n', force_unicode(value)) # normalize newlines paras = re.split('\n{2,}', value) if autoescape: paras = [u'<p>%s</p>' % escape(p).replace('\n', '<br />') for p in paras] else: paras = [u'<p>%s</p>' % p.replace('\n', '<br />') for p in paras] return u'\n\n'.join(paras) linebreaks = allow_lazy(linebreaks, unicode) def strip_tags(value): """Returns the given HTML with all tags stripped.""" return re.sub(r'<[^>]*?>', '', force_unicode(value)) strip_tags = allow_lazy(strip_tags) def strip_spaces_between_tags(value): """Returns the given HTML with spaces between tags removed.""" return re.sub(r'>\s+<', '><', force_unicode(value)) strip_spaces_between_tags = allow_lazy(strip_spaces_between_tags, unicode) def strip_entities(value): """Returns the given HTML with all entities (&something;) stripped.""" return re.sub(r'&(?:\w+|#\d+);', '', force_unicode(value)) strip_entities = allow_lazy(strip_entities, unicode) def fix_ampersands(value): """Returns the given HTML with all unencoded ampersands encoded correctly.""" return unencoded_ampersands_re.sub('&amp;', force_unicode(value)) fix_ampersands = allow_lazy(fix_ampersands, unicode) def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False): """ Converts any URLs in text into clickable links. Works on http://, https://, www. links and links ending in .org, .net or .com. Links can have trailing punctuation (periods, commas, close-parens) and leading punctuation (opening parens) and it'll still do the right thing. If trim_url_limit is not None, the URLs in link text longer than this limit will truncated to trim_url_limit-3 characters and appended with an elipsis. If nofollow is True, the URLs in link text will get a rel="nofollow" attribute. If autoescape is True, the link text and URLs will get autoescaped. """ trim_url = lambda x, limit=trim_url_limit: limit is not None and (len(x) > limit and ('%s...' % x[:max(0, limit - 3)])) or x safe_input = isinstance(text, SafeData) words = word_split_re.split(force_unicode(text)) nofollow_attr = nofollow and ' rel="nofollow"' or '' for i, word in enumerate(words): match = None if '.' in word or '@' in word or ':' in word: match = punctuation_re.match(word) if match: lead, middle, trail = match.groups() # Make URL we want to point to. url = None if middle.startswith('http://') or middle.startswith('https://'): url = urlquote(middle, safe='/&=:;#?+*') elif middle.startswith('www.') or ('@' not in middle and \ middle and middle[0] in string.ascii_letters + string.digits and \ (middle.endswith('.org') or middle.endswith('.net') or middle.endswith('.com'))): url = urlquote('http://%s' % middle, safe='/&=:;#?+*') elif '@' in middle and not ':' in middle and simple_email_re.match(middle): url = 'mailto:%s' % middle nofollow_attr = '' # Make link. if url: trimmed = trim_url(middle) if autoescape and not safe_input: lead, trail = escape(lead), escape(trail) url, trimmed = escape(url), escape(trimmed) middle = '<a href="%s"%s>%s</a>' % (url, nofollow_attr, trimmed) words[i] = mark_safe('%s%s%s' % (lead, middle, trail)) else: if safe_input: words[i] = mark_safe(word) elif autoescape: words[i] = escape(word) elif safe_input: words[i] = mark_safe(word) elif autoescape: words[i] = escape(word) return u''.join(words) urlize = allow_lazy(urlize, unicode) def clean_html(text): """ Clean the given HTML. Specifically, do the following: * Convert <b> and <i> to <strong> and <em>. * Encode all ampersands correctly. * Remove all "target" attributes from <a> tags. * Remove extraneous HTML, such as presentational tags that open and immediately close and <br clear="all">. * Convert hard-coded bullets into HTML unordered lists. * Remove stuff like "<p>&nbsp;&nbsp;</p>", but only if it's at the bottom of the text. """ from django.utils.text import normalize_newlines text = normalize_newlines(force_unicode(text)) text = re.sub(r'<(/?)\s*b\s*>', '<\\1strong>', text) text = re.sub(r'<(/?)\s*i\s*>', '<\\1em>', text) text = fix_ampersands(text) # Remove all target="" attributes from <a> tags. text = link_target_attribute_re.sub('\\1', text) # Trim stupid HTML such as <br clear="all">. text = html_gunk_re.sub('', text) # Convert hard-coded bullets into HTML unordered lists. def replace_p_tags(match): s = match.group().replace('</p>', '</li>') for d in DOTS: s = s.replace('<p>%s' % d, '<li>') return u'<ul>\n%s\n</ul>' % s text = hard_coded_bullets_re.sub(replace_p_tags, text) # Remove stuff like "<p>&nbsp;&nbsp;</p>", but only if it's at the bottom # of the text. text = trailing_empty_content_re.sub('', text) return text clean_html = allow_lazy(clean_html, unicode)
apache-2.0
thirdwing/SFrame
oss_src/unity/python/sframe/meta/decompiler/tests/test_comprehensions.py
15
5000
''' Created on Nov 6, 2011 @author: sean ''' import unittest from ...decompiler.tests import Base class ListComprehension(Base): def test_comp1(self): stmnt = '[a for b in c]' self.statement(stmnt) def test_comp2(self): stmnt = '[a() +1 for b in c]' self.statement(stmnt) def test_comp3(self): stmnt = 'y = [a() +1 for b in c]' self.statement(stmnt) def test_comp_ifs(self): stmnt = 'y = [a() +1 for b in c if asdf]' self.statement(stmnt) def test_comp_ifs1(self): stmnt = 'y = [a() +1 for b in c if asdf if asd]' self.statement(stmnt) def test_comp_ifs2(self): stmnt = 'y = [a() +1 for b in c if asdf if not asd]' self.statement(stmnt) @unittest.expectedFailure def test_multi_comp1(self): stmnt = '[a for b in c for d in e]' self.statement(stmnt) @unittest.expectedFailure def test_multi_comp2(self): stmnt = '[a() +1 for b in c for d in e]' self.statement(stmnt) @unittest.expectedFailure def test_multi_comp3(self): stmnt = 'y = [a() +1 for b in c for d in e]' self.statement(stmnt) @unittest.expectedFailure def test_multi_comp_ifs(self): stmnt = 'y = [a() +1 for b in c if asdf for d in e]' self.statement(stmnt) @unittest.expectedFailure def test_multi_comp_ifs1(self): stmnt = 'y = [a() +1 for b in c if asdf if asd for d in e if this]' self.statement(stmnt) @unittest.expectedFailure def test_multi_comp_ifs2(self): stmnt = 'y = [a() +1 for b in c for d in e if adsf]' self.statement(stmnt) class SetComprehension(Base): def test_comp1(self): stmnt = '{a for b in c}' self.statement(stmnt) def test_comp2(self): stmnt = '{a() +1 for b in c}' self.statement(stmnt) def test_comp3(self): stmnt = 'y = {a() +1 for b in c}' self.statement(stmnt) def test_comp_ifs(self): stmnt = 'y = {a() +1 for b in c if asdf}' self.statement(stmnt) def test_comp_ifs1(self): stmnt = 'y = {a() +1 for b in c if asdf if asd}' self.statement(stmnt) def test_comp_ifs2(self): stmnt = 'y = {a() +1 for b in c if asdf if not asd}' self.statement(stmnt) @unittest.expectedFailure def test_multi_comp1(self): stmnt = '{a for b in c for d in e}' self.statement(stmnt) @unittest.expectedFailure def test_multi_comp2(self): stmnt = '{a() +1 for b in c for d in e}' self.statement(stmnt) @unittest.expectedFailure def test_multi_comp3(self): stmnt = 'y = {a() +1 for b in c for d in e}' self.statement(stmnt) @unittest.expectedFailure def test_multi_comp_ifs(self): stmnt = 'y = {a() +1 for b in c if asdf for d in e}' self.statement(stmnt) @unittest.expectedFailure def test_multi_comp_ifs1(self): stmnt = 'y = {a() +1 for b in c if asdf if asd for d in e if this}' self.statement(stmnt) @unittest.expectedFailure def test_multi_comp_ifs2(self): stmnt = 'y = {a() +1 for b in c for d in e if adsf}' self.statement(stmnt) class DictComprehension(Base): def test_comp1(self): stmnt = '{a:q for b in c}' self.statement(stmnt) def test_comp2(self): stmnt = '{a() +1:q for b in c}' self.statement(stmnt) def test_comp3(self): stmnt = 'y = {a() +1:q for b in c}' self.statement(stmnt) def test_comp_ifs(self): stmnt = 'y = {a() +1:q for b in c if asdf}' self.statement(stmnt) def test_comp_ifs1(self): stmnt = 'y = {a() +1:q for b in c if asdf if asd}' self.statement(stmnt) def test_comp_ifs2(self): stmnt = 'y = {a() +1:q for b in c if asdf if not asd}' self.statement(stmnt) @unittest.expectedFailure def test_multi_comp1(self): stmnt = '{a:q for b in c for d in e}' self.statement(stmnt) @unittest.expectedFailure def test_multi_comp2(self): stmnt = '{a():q +1 for b in c for d in e}' self.statement(stmnt) @unittest.expectedFailure def test_multi_comp3(self): stmnt = 'y = {a() +1:q for b in c for d in e}' self.statement(stmnt) @unittest.expectedFailure def test_multi_comp_ifs(self): stmnt = 'y = {a() +1:q for b in c if asdf for d in e}' self.statement(stmnt) @unittest.expectedFailure def test_multi_comp_ifs1(self): stmnt = 'y = {a() +1:q for b in c if asdf if asd for d in e if this}' self.statement(stmnt) @unittest.expectedFailure def test_multi_comp_ifs2(self): stmnt = 'y = {a() +1:q for b in c for d in e if adsf}' self.statement(stmnt) if __name__ == "__main__": #import sys;sys.argv = ['', 'Test.testName'] unittest.main()
bsd-3-clause
leifurhauks/grpc
tools/gcp/stress_test/run_server.py
37
5970
#!/usr/bin/env python2.7 # Copyright 2015-2016, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import datetime import os import resource import select import subprocess import sys import time from stress_test_utils import BigQueryHelper from stress_test_utils import EventType def run_server(): """This is a wrapper around the interop server and performs the following: 1) Create a 'Summary table' in Big Query to record events like the server started, completed successfully or failed. NOTE: This also creates another table called the QPS table which is currently NOT needed on the server (it is needed on the stress test clients) 2) Start the server process and add a row in Big Query summary table 3) Wait for the server process to terminate. The server process does not terminate unless there is an error. If the server process terminated with a failure, add a row in Big Query and wait forever. NOTE: This script typically runs inside a GKE pod which means that the pod gets destroyed when the script exits. However, in case the server process fails, we would not want the pod to be destroyed (since we might want to connect to the pod for examining logs). This is the reason why the script waits forever in case of failures. """ # Set the 'core file' size to 'unlimited' so that 'core' files are generated # if the server crashes (Note: This is not relevant for Java and Go servers) resource.setrlimit(resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY)) # Read the parameters from environment variables env = dict(os.environ) run_id = env['RUN_ID'] # The unique run id for this test image_type = env['STRESS_TEST_IMAGE_TYPE'] stress_server_cmd = env['STRESS_TEST_CMD'].split() args_str = env['STRESS_TEST_ARGS_STR'] pod_name = env['POD_NAME'] project_id = env['GCP_PROJECT_ID'] dataset_id = env['DATASET_ID'] summary_table_id = env['SUMMARY_TABLE_ID'] qps_table_id = env['QPS_TABLE_ID'] # The following parameter is to inform us whether the server runs forever # until forcefully stopped or will it naturally stop after sometime. # This way, we know that the process should not terminate (even if it does # with a success exit code) and flag any termination as a failure. will_run_forever = env.get('WILL_RUN_FOREVER', '1') logfile_name = env.get('LOGFILE_NAME') print('pod_name: %s, project_id: %s, run_id: %s, dataset_id: %s, ' 'summary_table_id: %s, qps_table_id: %s') % (pod_name, project_id, run_id, dataset_id, summary_table_id, qps_table_id) bq_helper = BigQueryHelper(run_id, image_type, pod_name, project_id, dataset_id, summary_table_id, qps_table_id) bq_helper.initialize() # Create BigQuery Dataset and Tables: Summary Table and Metrics Table if not bq_helper.setup_tables(): print 'Error in creating BigQuery tables' return start_time = datetime.datetime.now() logfile = None details = 'Logging to stdout' if logfile_name is not None: print 'Opening log file: ', logfile_name logfile = open(logfile_name, 'w') details = 'Logfile: %s' % logfile_name stress_cmd = stress_server_cmd + [x for x in args_str.split()] details = '%s, Stress server command: %s' % (details, str(stress_cmd)) # Update status that the test is starting (in the status table) bq_helper.insert_summary_row(EventType.STARTING, details) print 'Launching process %s ...' % stress_cmd stress_p = subprocess.Popen(args=stress_cmd, stdout=logfile, stderr=subprocess.STDOUT) # Update the status to running if subprocess.Popen launched the server if stress_p.poll() is None: bq_helper.insert_summary_row(EventType.RUNNING, '') # Wait for the server process to terminate returncode = stress_p.wait() if will_run_forever == '1' or returncode != 0: end_time = datetime.datetime.now().isoformat() event_type = EventType.FAILURE details = 'Returncode: %d; End time: %s' % (returncode, end_time) bq_helper.insert_summary_row(event_type, details) print 'Waiting indefinitely..' select.select([], [], []) return returncode if __name__ == '__main__': run_server()
bsd-3-clause
Wynjones1/psemu
deps/googletest/googletest/test/gtest_list_tests_unittest.py
1898
6515
#!/usr/bin/env python # # Copyright 2006, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test for Google Test's --gtest_list_tests flag. A user can ask Google Test to list all tests by specifying the --gtest_list_tests flag. This script tests such functionality by invoking gtest_list_tests_unittest_ (a program written with Google Test) the command line flags. """ __author__ = 'phanna@google.com (Patrick Hanna)' import gtest_test_utils import re # Constants. # The command line flag for enabling/disabling listing all tests. LIST_TESTS_FLAG = 'gtest_list_tests' # Path to the gtest_list_tests_unittest_ program. EXE_PATH = gtest_test_utils.GetTestExecutablePath('gtest_list_tests_unittest_') # The expected output when running gtest_list_tests_unittest_ with # --gtest_list_tests EXPECTED_OUTPUT_NO_FILTER_RE = re.compile(r"""FooDeathTest\. Test1 Foo\. Bar1 Bar2 DISABLED_Bar3 Abc\. Xyz Def FooBar\. Baz FooTest\. Test1 DISABLED_Test2 Test3 TypedTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\. TestA TestB TypedTest/1\. # TypeParam = int\s*\* TestA TestB TypedTest/2\. # TypeParam = .*MyArray<bool,\s*42> TestA TestB My/TypeParamTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\. TestA TestB My/TypeParamTest/1\. # TypeParam = int\s*\* TestA TestB My/TypeParamTest/2\. # TypeParam = .*MyArray<bool,\s*42> TestA TestB MyInstantiation/ValueParamTest\. TestA/0 # GetParam\(\) = one line TestA/1 # GetParam\(\) = two\\nlines TestA/2 # GetParam\(\) = a very\\nlo{241}\.\.\. TestB/0 # GetParam\(\) = one line TestB/1 # GetParam\(\) = two\\nlines TestB/2 # GetParam\(\) = a very\\nlo{241}\.\.\. """) # The expected output when running gtest_list_tests_unittest_ with # --gtest_list_tests and --gtest_filter=Foo*. EXPECTED_OUTPUT_FILTER_FOO_RE = re.compile(r"""FooDeathTest\. Test1 Foo\. Bar1 Bar2 DISABLED_Bar3 FooBar\. Baz FooTest\. Test1 DISABLED_Test2 Test3 """) # Utilities. def Run(args): """Runs gtest_list_tests_unittest_ and returns the list of tests printed.""" return gtest_test_utils.Subprocess([EXE_PATH] + args, capture_stderr=False).output # The unit test. class GTestListTestsUnitTest(gtest_test_utils.TestCase): """Tests using the --gtest_list_tests flag to list all tests.""" def RunAndVerify(self, flag_value, expected_output_re, other_flag): """Runs gtest_list_tests_unittest_ and verifies that it prints the correct tests. Args: flag_value: value of the --gtest_list_tests flag; None if the flag should not be present. expected_output_re: regular expression that matches the expected output after running command; other_flag: a different flag to be passed to command along with gtest_list_tests; None if the flag should not be present. """ if flag_value is None: flag = '' flag_expression = 'not set' elif flag_value == '0': flag = '--%s=0' % LIST_TESTS_FLAG flag_expression = '0' else: flag = '--%s' % LIST_TESTS_FLAG flag_expression = '1' args = [flag] if other_flag is not None: args += [other_flag] output = Run(args) if expected_output_re: self.assert_( expected_output_re.match(output), ('when %s is %s, the output of "%s" is "%s",\n' 'which does not match regex "%s"' % (LIST_TESTS_FLAG, flag_expression, ' '.join(args), output, expected_output_re.pattern))) else: self.assert_( not EXPECTED_OUTPUT_NO_FILTER_RE.match(output), ('when %s is %s, the output of "%s" is "%s"'% (LIST_TESTS_FLAG, flag_expression, ' '.join(args), output))) def testDefaultBehavior(self): """Tests the behavior of the default mode.""" self.RunAndVerify(flag_value=None, expected_output_re=None, other_flag=None) def testFlag(self): """Tests using the --gtest_list_tests flag.""" self.RunAndVerify(flag_value='0', expected_output_re=None, other_flag=None) self.RunAndVerify(flag_value='1', expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE, other_flag=None) def testOverrideNonFilterFlags(self): """Tests that --gtest_list_tests overrides the non-filter flags.""" self.RunAndVerify(flag_value='1', expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE, other_flag='--gtest_break_on_failure') def testWithFilterFlags(self): """Tests that --gtest_list_tests takes into account the --gtest_filter flag.""" self.RunAndVerify(flag_value='1', expected_output_re=EXPECTED_OUTPUT_FILTER_FOO_RE, other_flag='--gtest_filter=Foo*') if __name__ == '__main__': gtest_test_utils.Main()
mit
joshwatson/binaryninja-api
python/examples/kaitai/gran_turismo_vol.py
1
5921
# This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild from pkg_resources import parse_version from .kaitaistruct import __version__ as ks_version, KaitaiStruct, KaitaiStream, BytesIO import collections if parse_version(ks_version) < parse_version('0.7'): raise Exception("Incompatible Kaitai Struct Python API: 0.7 or later is required, but you have %s" % (ks_version)) class GranTurismoVol(KaitaiStruct): SEQ_FIELDS = ["magic", "num_files", "num_entries", "reserved", "offsets"] def __init__(self, _io, _parent=None, _root=None): self._io = _io self._parent = _parent self._root = _root if _root else self self._debug = collections.defaultdict(dict) def _read(self): self._debug['magic']['start'] = self._io.pos() self.magic = self._io.ensure_fixed_contents(b"\x47\x54\x46\x53\x00\x00\x00\x00") self._debug['magic']['end'] = self._io.pos() self._debug['num_files']['start'] = self._io.pos() self.num_files = self._io.read_u2le() self._debug['num_files']['end'] = self._io.pos() self._debug['num_entries']['start'] = self._io.pos() self.num_entries = self._io.read_u2le() self._debug['num_entries']['end'] = self._io.pos() self._debug['reserved']['start'] = self._io.pos() self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00") self._debug['reserved']['end'] = self._io.pos() self._debug['offsets']['start'] = self._io.pos() self.offsets = [None] * (self.num_files) for i in range(self.num_files): if not 'arr' in self._debug['offsets']: self._debug['offsets']['arr'] = [] self._debug['offsets']['arr'].append({'start': self._io.pos()}) self.offsets[i] = self._io.read_u4le() self._debug['offsets']['arr'][i]['end'] = self._io.pos() self._debug['offsets']['end'] = self._io.pos() class FileInfo(KaitaiStruct): SEQ_FIELDS = ["timestamp", "offset_idx", "flags", "name"] def __init__(self, _io, _parent=None, _root=None): self._io = _io self._parent = _parent self._root = _root if _root else self self._debug = collections.defaultdict(dict) def _read(self): self._debug['timestamp']['start'] = self._io.pos() self.timestamp = self._io.read_u4le() self._debug['timestamp']['end'] = self._io.pos() self._debug['offset_idx']['start'] = self._io.pos() self.offset_idx = self._io.read_u2le() self._debug['offset_idx']['end'] = self._io.pos() self._debug['flags']['start'] = self._io.pos() self.flags = self._io.read_u1() self._debug['flags']['end'] = self._io.pos() self._debug['name']['start'] = self._io.pos() self.name = (KaitaiStream.bytes_terminate(KaitaiStream.bytes_strip_right(self._io.read_bytes(25), 0), 0, False)).decode(u"ASCII") self._debug['name']['end'] = self._io.pos() @property def size(self): if hasattr(self, '_m_size'): return self._m_size if hasattr(self, '_m_size') else None self._m_size = ((self._root.offsets[(self.offset_idx + 1)] & 4294965248) - self._root.offsets[self.offset_idx]) return self._m_size if hasattr(self, '_m_size') else None @property def body(self): if hasattr(self, '_m_body'): return self._m_body if hasattr(self, '_m_body') else None if not (self.is_dir): _pos = self._io.pos() self._io.seek((self._root.offsets[self.offset_idx] & 4294965248)) self._debug['_m_body']['start'] = self._io.pos() self._m_body = self._io.read_bytes(self.size) self._debug['_m_body']['end'] = self._io.pos() self._io.seek(_pos) return self._m_body if hasattr(self, '_m_body') else None @property def is_dir(self): if hasattr(self, '_m_is_dir'): return self._m_is_dir if hasattr(self, '_m_is_dir') else None self._m_is_dir = (self.flags & 1) != 0 return self._m_is_dir if hasattr(self, '_m_is_dir') else None @property def is_last_entry(self): if hasattr(self, '_m_is_last_entry'): return self._m_is_last_entry if hasattr(self, '_m_is_last_entry') else None self._m_is_last_entry = (self.flags & 128) != 0 return self._m_is_last_entry if hasattr(self, '_m_is_last_entry') else None @property def ofs_dir(self): if hasattr(self, '_m_ofs_dir'): return self._m_ofs_dir if hasattr(self, '_m_ofs_dir') else None self._m_ofs_dir = self.offsets[1] return self._m_ofs_dir if hasattr(self, '_m_ofs_dir') else None @property def files(self): if hasattr(self, '_m_files'): return self._m_files if hasattr(self, '_m_files') else None _pos = self._io.pos() self._io.seek((self.ofs_dir & 4294965248)) self._debug['_m_files']['start'] = self._io.pos() self._m_files = [None] * (self._root.num_entries) for i in range(self._root.num_entries): if not 'arr' in self._debug['_m_files']: self._debug['_m_files']['arr'] = [] self._debug['_m_files']['arr'].append({'start': self._io.pos()}) _t__m_files = self._root.FileInfo(self._io, self, self._root) _t__m_files._read() self._m_files[i] = _t__m_files self._debug['_m_files']['arr'][i]['end'] = self._io.pos() self._debug['_m_files']['end'] = self._io.pos() self._io.seek(_pos) return self._m_files if hasattr(self, '_m_files') else None
mit
nlloyd/SubliminalCollaborator
libs/twisted/python/test/test_syslog.py
81
4948
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. from twisted.trial.unittest import TestCase from twisted.python.failure import Failure try: import syslog as stdsyslog except ImportError: stdsyslog = None else: from twisted.python import syslog class SyslogObserverTests(TestCase): """ Tests for L{SyslogObserver} which sends Twisted log events to the syslog. """ events = None if stdsyslog is None: skip = "syslog is not supported on this platform" def setUp(self): self.patch(syslog.SyslogObserver, 'openlog', self.openlog) self.patch(syslog.SyslogObserver, 'syslog', self.syslog) self.observer = syslog.SyslogObserver('SyslogObserverTests') def openlog(self, prefix, options, facility): self.logOpened = (prefix, options, facility) self.events = [] def syslog(self, options, message): self.events.append((options, message)) def test_emitWithoutMessage(self): """ L{SyslogObserver.emit} ignores events with an empty value for the C{'message'} key. """ self.observer.emit({'message': (), 'isError': False, 'system': '-'}) self.assertEqual(self.events, []) def test_emitCustomPriority(self): """ L{SyslogObserver.emit} uses the value of the C{'syslogPriority'} as the syslog priority, if that key is present in the event dictionary. """ self.observer.emit({ 'message': ('hello, world',), 'isError': False, 'system': '-', 'syslogPriority': stdsyslog.LOG_DEBUG}) self.assertEqual( self.events, [(stdsyslog.LOG_DEBUG, '[-] hello, world')]) def test_emitErrorPriority(self): """ L{SyslogObserver.emit} uses C{LOG_ALERT} if the event represents an error. """ self.observer.emit({ 'message': ('hello, world',), 'isError': True, 'system': '-', 'failure': Failure(Exception("foo"))}) self.assertEqual( self.events, [(stdsyslog.LOG_ALERT, '[-] hello, world')]) def test_emitCustomPriorityOverridesError(self): """ L{SyslogObserver.emit} uses the value of the C{'syslogPriority'} key if it is specified even if the event dictionary represents an error. """ self.observer.emit({ 'message': ('hello, world',), 'isError': True, 'system': '-', 'syslogPriority': stdsyslog.LOG_NOTICE, 'failure': Failure(Exception("bar"))}) self.assertEqual( self.events, [(stdsyslog.LOG_NOTICE, '[-] hello, world')]) def test_emitCustomFacility(self): """ L{SyslogObserver.emit} uses the value of the C{'syslogPriority'} as the syslog priority, if that key is present in the event dictionary. """ self.observer.emit({ 'message': ('hello, world',), 'isError': False, 'system': '-', 'syslogFacility': stdsyslog.LOG_CRON}) self.assertEqual( self.events, [(stdsyslog.LOG_INFO | stdsyslog.LOG_CRON, '[-] hello, world')]) def test_emitCustomSystem(self): """ L{SyslogObserver.emit} uses the value of the C{'system'} key to prefix the logged message. """ self.observer.emit({'message': ('hello, world',), 'isError': False, 'system': 'nonDefaultSystem'}) self.assertEqual( self.events, [(stdsyslog.LOG_INFO, "[nonDefaultSystem] hello, world")]) def test_emitMessage(self): """ L{SyslogObserver.emit} logs the value of the C{'message'} key of the event dictionary it is passed to the syslog. """ self.observer.emit({ 'message': ('hello, world',), 'isError': False, 'system': '-'}) self.assertEqual( self.events, [(stdsyslog.LOG_INFO, "[-] hello, world")]) def test_emitMultilineMessage(self): """ Each line of a multiline message is emitted separately to the syslog. """ self.observer.emit({ 'message': ('hello,\nworld',), 'isError': False, 'system': '-'}) self.assertEqual( self.events, [(stdsyslog.LOG_INFO, '[-] hello,'), (stdsyslog.LOG_INFO, '[-] \tworld')]) def test_emitStripsTrailingEmptyLines(self): """ Trailing empty lines of a multiline message are omitted from the messages sent to the syslog. """ self.observer.emit({ 'message': ('hello,\nworld\n\n',), 'isError': False, 'system': '-'}) self.assertEqual( self.events, [(stdsyslog.LOG_INFO, '[-] hello,'), (stdsyslog.LOG_INFO, '[-] \tworld')])
apache-2.0
vedujoshi/os_tempest
tempest/api/object_storage/test_object_formpost.py
4
4657
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com> # # Author: Christian Schwede <christian.schwede@enovance.com> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import hmac import time import urlparse from tempest.api.object_storage import base from tempest.common.utils import data_utils from tempest import test class ObjectFormPostTest(base.BaseObjectTest): metadata = {} containers = [] @classmethod @test.safe_setup def setUpClass(cls): super(ObjectFormPostTest, cls).setUpClass() cls.container_name = data_utils.rand_name(name='TestContainer') cls.object_name = data_utils.rand_name(name='ObjectTemp') cls.container_client.create_container(cls.container_name) cls.containers = [cls.container_name] cls.key = 'Meta' cls.metadata = {'Temp-URL-Key': cls.key} cls.account_client.create_account_metadata(metadata=cls.metadata) def setUp(self): super(ObjectFormPostTest, self).setUp() # make sure the metadata has been set account_client_metadata, _ = \ self.account_client.list_account_metadata() self.assertIn('x-account-meta-temp-url-key', account_client_metadata) self.assertEqual( account_client_metadata['x-account-meta-temp-url-key'], self.key) @classmethod def tearDownClass(cls): cls.account_client.delete_account_metadata(metadata=cls.metadata) cls.delete_containers(cls.containers) cls.data.teardown_all() super(ObjectFormPostTest, cls).tearDownClass() def get_multipart_form(self, expires=600): path = "%s/%s/%s" % ( urlparse.urlparse(self.container_client.base_url).path, self.container_name, self.object_name) redirect = '' max_file_size = 104857600 max_file_count = 10 expires += int(time.time()) hmac_body = '%s\n%s\n%s\n%s\n%s' % (path, redirect, max_file_size, max_file_count, expires) signature = hmac.new(self.key, hmac_body, hashlib.sha1).hexdigest() fields = {'redirect': redirect, 'max_file_size': str(max_file_size), 'max_file_count': str(max_file_count), 'expires': str(expires), 'signature': signature} boundary = '--boundary--' data = [] for (key, value) in fields.items(): data.append('--' + boundary) data.append('Content-Disposition: form-data; name="%s"' % key) data.append('') data.append(value) data.append('--' + boundary) data.append('Content-Disposition: form-data; ' 'name="file1"; filename="testfile"') data.append('Content-Type: application/octet-stream') data.append('') data.append('hello world') data.append('--' + boundary + '--') data.append('') body = '\r\n'.join(data) content_type = 'multipart/form-data; boundary=%s' % boundary return body, content_type @test.requires_ext(extension='formpost', service='object') @test.attr(type='gate') def test_post_object_using_form(self): body, content_type = self.get_multipart_form() headers = {'Content-Type': content_type, 'Content-Length': str(len(body))} url = "%s/%s" % (self.container_name, self.object_name) resp, body = self.object_client.post(url, body, headers=headers) self.assertIn(int(resp['status']), test.HTTP_SUCCESS) self.assertHeaders(resp, "Object", "POST") # Ensure object is available resp, body = self.object_client.get("%s/%s%s" % ( self.container_name, self.object_name, "testfile")) self.assertIn(int(resp['status']), test.HTTP_SUCCESS) self.assertHeaders(resp, "Object", "GET") self.assertEqual(body, "hello world")
apache-2.0
AOKP/kernel_htc_m7
tools/perf/scripts/python/syscall-counts-by-pid.py
11180
1927
# system call counts, by pid # (c) 2010, Tom Zanussi <tzanussi@gmail.com> # Licensed under the terms of the GNU GPL License version 2 # # Displays system-wide system call totals, broken down by syscall. # If a [comm] arg is specified, only syscalls called by [comm] are displayed. import os, sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import syscall_name usage = "perf script -s syscall-counts-by-pid.py [comm]\n"; for_comm = None for_pid = None if len(sys.argv) > 2: sys.exit(usage) if len(sys.argv) > 1: try: for_pid = int(sys.argv[1]) except: for_comm = sys.argv[1] syscalls = autodict() def trace_begin(): print "Press control+C to stop and show the summary" def trace_end(): print_syscall_totals() def raw_syscalls__sys_enter(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, args): if (for_comm and common_comm != for_comm) or \ (for_pid and common_pid != for_pid ): return try: syscalls[common_comm][common_pid][id] += 1 except TypeError: syscalls[common_comm][common_pid][id] = 1 def print_syscall_totals(): if for_comm is not None: print "\nsyscall events for %s:\n\n" % (for_comm), else: print "\nsyscall events by comm/pid:\n\n", print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"), print "%-40s %10s\n" % ("----------------------------------------", \ "----------"), comm_keys = syscalls.keys() for comm in comm_keys: pid_keys = syscalls[comm].keys() for pid in pid_keys: print "\n%s [%d]\n" % (comm, pid), id_keys = syscalls[comm][pid].keys() for id, val in sorted(syscalls[comm][pid].iteritems(), \ key = lambda(k, v): (v, k), reverse = True): print " %-38s %10d\n" % (syscall_name(id), val),
gpl-2.0
joone/chromium-crosswalk
tools/telemetry/telemetry/value/failure.py
23
2670
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import sys import traceback from telemetry import value as value_module class FailureValue(value_module.Value): def __init__(self, page, exc_info, description=None, tir_label=None): """A value representing a failure when running the page. Args: page: The page where this failure occurs. exc_info: The exception info (sys.exc_info()) corresponding to this failure. """ exc_type = exc_info[0].__name__ super(FailureValue, self).__init__(page, exc_type, '', True, description, tir_label) self._exc_info = exc_info @classmethod def FromMessage(cls, page, message): """Creates a failure value for a given string message. Args: page: The page where this failure occurs. message: A string message describing the failure. """ exc_info = cls._GetExcInfoFromMessage(message) return FailureValue(page, exc_info) @staticmethod def _GetExcInfoFromMessage(message): try: raise Exception(message) except Exception: return sys.exc_info() def __repr__(self): if self.page: page_name = self.page.display_name else: page_name = 'None' return 'FailureValue(%s, %s)' % ( page_name, GetStringFromExcInfo(self._exc_info)) @property def exc_info(self): return self._exc_info def GetBuildbotDataType(self, output_context): return None def GetBuildbotValue(self): return None def GetChartAndTraceNameForPerPageResult(self): return None def GetRepresentativeNumber(self): return None def GetRepresentativeString(self): return None @staticmethod def GetJSONTypeName(): return 'failure' def AsDict(self): d = super(FailureValue, self).AsDict() d['value'] = GetStringFromExcInfo(self.exc_info) return d @staticmethod def FromDict(value_dict, page_dict): kwargs = value_module.Value.GetConstructorKwArgs(value_dict, page_dict) del kwargs['name'] del kwargs['units'] if 'important' in kwargs: del kwargs['important'] kwargs['exc_info'] = FailureValue._GetExcInfoFromMessage( value_dict['value']) return FailureValue(**kwargs) @classmethod def MergeLikeValuesFromSamePage(cls, values): assert False, 'Should not be called.' @classmethod def MergeLikeValuesFromDifferentPages(cls, values): assert False, 'Should not be called.' def GetStringFromExcInfo(exc_info): return ''.join(traceback.format_exception(*exc_info))
bsd-3-clause
gramps-project/gramps
gramps/test/regrtest.py
5
5751
#! /usr/bin/env python # # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2000-2005 Donald N. Allingham # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # test/regrtest.py # Original: RunAllTests.py Written by Richard Taylor # (jgs: revised for embedded "test" subdirs as regrtest.py ) """ Testing framework for performing a variety of unttests for Gramps. """ # TODO: review whether logging is really useful for unittest # it does seem to work .. try -v5 import logging import os import sys import unittest from optparse import OptionParser from .test import test_util as tu gramps_root=tu.path_append_parent() def make_parser(): usage = "usage: %prog [options]" parser = OptionParser(usage) parser.add_option("-v", "--verbosity", type="int", dest="verbose_level", default=0, help="Level of verboseness") parser.add_option("-p", "--performance", action="store_true", dest="performance", default=False, help="Run the performance tests.") return parser def getTestSuites(loc=gramps_root): # in a developer's checkout, it is worth filtering-out .git # and we only need to look inside test subdirs # (this might change) # this is not so performance critical that we can't afford # a couple of function calls to make it readable # TODO: handle parts of a module (see unittest.py) ldr= unittest.defaultTestLoader test_dirname = "test" test_suffix = "_test.py" def test_mod(p,ds): """ test for path p=test dir; removes a dir '.git' in ds """ if ".git" in ds: ds.remove(".git") return os.path.basename(p) == test_dirname def match_mod(fs): """ test for any test modules; deletes all non-tests """ # NB: do not delete fs elements within a "for f in fs" dels= [f for f in fs if not f.endswith(test_suffix)] for f in dels: fs.remove(f) return len(fs) > 0 test_suites = [] perf_suites = [] # note that test_mod and match_mod modify passed-in lists paths = [(path,files) for path,dirs,files in os.walk(loc) if test_mod(path,dirs) and match_mod(files)] ## NO -- see explanation below ## oldpath = list(sys.path) for (dir,test_modules) in paths: sys.path.append(dir) for module in test_modules: if not module.endswith(test_suffix): raise ValueError mod = __import__(module[:-len(".py")]) if getattr(mod,"suite",None): test_suites.append(mod.suite()) else: test_suites.append(ldr.loadTestsFromModule(mod)) try: perf_suites.append(mod.perfSuite()) except: pass # NO: was: remove temporary paths added # this seems like it should be reasonable, # but it causes failure in _GrampsDbWRFactories_test.py # (I suspect it is an actual bug in the runner # but the easiest fix is to keep the imports, # which is what other loaders seem to do) # ==> this aspect of test frameworks is *hard* ## NO -- do NOT: ## remove temporary paths added ## sys.path = list(oldpath) return (test_suites,perf_suites) if __name__ == '__main__': def logging_init(): global logger global console console = logging.StreamHandler() console.setLevel(logging.INFO) console.setFormatter(logging.Formatter( '%(name)-12s: %(levelname)-8s %(message)s')) logger = logging.getLogger('Gramps') logger.addHandler(console) return console, logger def logging_adjust(verbose_level): if verbose_level == 1: logger.setLevel(logging.INFO) console.setLevel(logging.INFO) elif verbose_level == 2: logger.setLevel(logging.DEBUG) console.setLevel(logging.DEBUG) elif verbose_level == 3: logger.setLevel(logging.NOTSET) console.setLevel(logging.NOTSET) elif verbose_level >= 4: logger.setLevel(logging.NOTSET) console.setLevel(logging.NOTSET) os.environ['GRAMPS_SIGNAL'] = "1" else: logger.setLevel(logging.ERROR) console.setLevel(logging.ERROR) console,logger = logging_init() options,args = make_parser().parse_args() logging_adjust(options.verbose_level) # TODO allow multiple subdirs, modules, or testnames # (see unittest.py) # hmmm this is starting to look like a unittest.Testprog # (maybe with a custom TestLoader) if args and os.path.isdir(args[0]): loc = args[0].rstrip(os.path.sep) else: loc = gramps_root utests, ptests = getTestSuites(loc) if options.performance: suite = unittest.TestSuite(ptests) else: suite = unittest.TestSuite(utests) unittest.TextTestRunner(verbosity=options.verbose_level).run(suite) #===eof===
gpl-2.0
eleonrk/SickRage
lib/pgi/cffilib/gir/giobjectinfo.py
19
4274
# Copyright 2013 Christoph Reiter # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. from .._compat import xrange from .._utils import string_decode from ._ffi import ffi, lib from .giregisteredtypeinfo import GIRegisteredTypeInfo from .gibaseinfo import GIBaseInfo, GIInfoType from .gifunctioninfo import GIFunctionInfo from .gipropertyinfo import GIPropertyInfo from .gifieldinfo import GIFieldInfo from .giconstantinfo import GIConstantInfo from .gisignalinfo import GISignalInfo, GIVFuncInfo from .gistructinfo import GIStructInfo from .giinterfaceinfo import GIInterfaceInfo @GIBaseInfo._register(GIInfoType.OBJECT) class GIObjectInfo(GIRegisteredTypeInfo): @property def abstract(self): return lib.g_object_info_get_abstract(self._ptr) @property def fundamental(self): return lib.g_object_info_get_fundamental(self._ptr) @property def n_methods(self): return lib.g_object_info_get_n_methods(self._ptr) def get_method(self, n): return GIFunctionInfo(lib.g_object_info_get_method(self._ptr, n)) def get_methods(self): for i in xrange(self.n_methods): yield self.get_method(i) @property def n_fields(self): return lib.g_object_info_get_n_fields(self._ptr) def get_field(self, n): return GIFieldInfo(lib.g_object_info_get_field(self._ptr, n)) def get_fields(self): for i in xrange(self.n_fields): yield self.get_field(i) @property def n_constants(self): return lib.g_object_info_get_n_constants(self._ptr) def get_constant(self, n): return GIConstantInfo(lib.g_object_info_get_constant(self._ptr, n)) def get_constants(self): for i in xrange(self.n_constants): yield self.get_constant(i) @property def n_vfuncs(self): return lib.g_object_info_get_n_vfuncs(self._ptr) def get_vfunc(self, n): return GIVFuncInfo(lib.g_object_info_get_vfunc(self._ptr, n)) def get_vfuncs(self): for i in xrange(self.n_vfuncs): yield self.get_vfunc(i) @property def n_signals(self): return lib.g_object_info_get_n_signals(self._ptr) def get_signal(self, n): return GISignalInfo(lib.g_object_info_get_signal(self._ptr, n)) def get_signals(self): for i in xrange(self.n_signals): yield self.get_signal(i) @property def n_interfaces(self): return lib.g_object_info_get_n_interfaces(self._ptr) def get_interface(self, n): return GIInterfaceInfo(lib.g_object_info_get_interface(self._ptr, n)) def get_interfaces(self): for i in xrange(self.n_interfaces): yield self.get_interface(i) @property def n_properties(self): return lib.g_object_info_get_n_properties(self._ptr) def get_property(self, n): return GIPropertyInfo(lib.g_object_info_get_property(self._ptr, n)) def get_properties(self): for i in xrange(self.n_properties): yield self.get_property(i) @property def type_name(self): res = lib.g_object_info_get_type_name(self._ptr) return string_decode(ffi, res) @property def type_init(self): res = lib.g_object_info_get_type_init(self._ptr) return string_decode(ffi, res) @property def ref_function(self): res = lib.g_object_info_get_ref_function(self._ptr) return string_decode(ffi, res) @property def unref_function(self): res = lib.g_object_info_get_unref_function(self._ptr) return string_decode(ffi, res) @property def set_value_function(self): res = lib.g_object_info_get_set_value_function(self._ptr) return string_decode(ffi, res) @property def get_value_function(self): res = lib.g_object_info_get_get_value_function(self._ptr) return string_decode(ffi, res) def get_class_struct(self): res = lib.g_object_info_get_class_struct(self._ptr) if res: return GIStructInfo(res)
gpl-3.0
jikamens/send-later
utils/check-accesskeys.py
1
2313
#!/usr/bin/env python3 import codecs from collections import OrderedDict import glob import os import re import sys import unicodedata symbolic_languages = ('ja', 'ja-JP', 'zh-CN', 'zh-TW') def remove_accents(input_str): nfkd_form = unicodedata.normalize('NFKD', input_str) return u"".join([c for c in nfkd_form if not unicodedata.combining(c)]) def check_file(file_name): locale = os.path.basename(os.path.dirname(file_name)) ok = True labels = {} access_keys = OrderedDict() seen_keys = {} with codecs.open(file_name, encoding='utf-8') as f: for line in f: match = re.match(r'<!ENTITY\s+(.*)\.label\s+"(.*)">', line) if match: labels[match.group(1)] = match.group(2) continue match = re.match(r'<!ENTITY\s+(.*)\.accesskey\s+"(.*)">', line) if match: access_keys[match.group(1)] = match.group(2) continue for name, access_key in access_keys.items(): unaccented_key = remove_accents(access_key) lower_key = unaccented_key.lower() if lower_key not in seen_keys: seen_keys[lower_key] = name try: label = labels[name] except KeyError: ok = False print(u"{0}: No {1}.label corresponding to {1}.accesskey".format( file_name, name)) continue unaccented_label = remove_accents(label) if len(access_key) != 1: ok = False print(u"{0}: {1}.accesskey \"{2}\" isn't one character".format( file_name, name, access_key)) continue if access_key not in label and unaccented_key not in unaccented_label \ and locale not in symbolic_languages: ok = False print(u"{0}: {1}: access key \"{2}\" not in label \"{3}\"".format( file_name, name, access_key, label)) if seen_keys[lower_key] != name: ok = False print(u"{0}: access key \"{1}\" for {2} conflicts with {3}". format(file_name, access_key, name, seen_keys[lower_key])) return ok ok = True for file_name in sorted(glob.glob("chrome/locale/*/*.dtd")): ok = check_file(file_name) and ok sys.exit(0 if ok else 1)
mpl-2.0
Curious72/sympy
sympy/simplify/powsimp.py
23
26166
from __future__ import print_function, division from collections import defaultdict from sympy.core.function import expand_log, count_ops from sympy.core import sympify, Basic, Dummy, S, Add, Mul, Pow, expand_mul, factor_terms from sympy.core.compatibility import ordered, default_sort_key, reduce from sympy.core.numbers import Integer, Rational from sympy.core.mul import prod, _keep_coeff from sympy.core.rules import Transform from sympy.functions import exp_polar, exp, log, root, polarify, unpolarify from sympy.polys import lcm, gcd from sympy.ntheory.factor_ import multiplicity def powsimp(expr, deep=False, combine='all', force=False, measure=count_ops): """ reduces expression by combining powers with similar bases and exponents. Notes ===== If deep is True then powsimp() will also simplify arguments of functions. By default deep is set to False. If force is True then bases will be combined without checking for assumptions, e.g. sqrt(x)*sqrt(y) -> sqrt(x*y) which is not true if x and y are both negative. You can make powsimp() only combine bases or only combine exponents by changing combine='base' or combine='exp'. By default, combine='all', which does both. combine='base' will only combine:: a a a 2x x x * y => (x*y) as well as things like 2 => 4 and combine='exp' will only combine :: a b (a + b) x * x => x combine='exp' will strictly only combine exponents in the way that used to be automatic. Also use deep=True if you need the old behavior. When combine='all', 'exp' is evaluated first. Consider the first example below for when there could be an ambiguity relating to this. This is done so things like the second example can be completely combined. If you want 'base' combined first, do something like powsimp(powsimp(expr, combine='base'), combine='exp'). Examples ======== >>> from sympy import powsimp, exp, log, symbols >>> from sympy.abc import x, y, z, n >>> powsimp(x**y*x**z*y**z, combine='all') x**(y + z)*y**z >>> powsimp(x**y*x**z*y**z, combine='exp') x**(y + z)*y**z >>> powsimp(x**y*x**z*y**z, combine='base', force=True) x**y*(x*y)**z >>> powsimp(x**z*x**y*n**z*n**y, combine='all', force=True) (n*x)**(y + z) >>> powsimp(x**z*x**y*n**z*n**y, combine='exp') n**(y + z)*x**(y + z) >>> powsimp(x**z*x**y*n**z*n**y, combine='base', force=True) (n*x)**y*(n*x)**z >>> x, y = symbols('x y', positive=True) >>> powsimp(log(exp(x)*exp(y))) log(exp(x)*exp(y)) >>> powsimp(log(exp(x)*exp(y)), deep=True) x + y Radicals with Mul bases will be combined if combine='exp' >>> from sympy import sqrt, Mul >>> x, y = symbols('x y') Two radicals are automatically joined through Mul: >>> a=sqrt(x*sqrt(y)) >>> a*a**3 == a**4 True But if an integer power of that radical has been autoexpanded then Mul does not join the resulting factors: >>> a**4 # auto expands to a Mul, no longer a Pow x**2*y >>> _*a # so Mul doesn't combine them x**2*y*sqrt(x*sqrt(y)) >>> powsimp(_) # but powsimp will (x*sqrt(y))**(5/2) >>> powsimp(x*y*a) # but won't when doing so would violate assumptions x*y*sqrt(x*sqrt(y)) """ from sympy.matrices.expressions.matexpr import MatrixSymbol def recurse(arg, **kwargs): _deep = kwargs.get('deep', deep) _combine = kwargs.get('combine', combine) _force = kwargs.get('force', force) _measure = kwargs.get('measure', measure) return powsimp(arg, _deep, _combine, _force, _measure) expr = sympify(expr) if (not isinstance(expr, Basic) or isinstance(expr, MatrixSymbol) or ( expr.is_Atom or expr in (exp_polar(0), exp_polar(1)))): return expr if deep or expr.is_Add or expr.is_Mul and _y not in expr.args: expr = expr.func(*[recurse(w) for w in expr.args]) if expr.is_Pow: return recurse(expr*_y, deep=False)/_y if not expr.is_Mul: return expr # handle the Mul if combine in ('exp', 'all'): # Collect base/exp data, while maintaining order in the # non-commutative parts of the product c_powers = defaultdict(list) nc_part = [] newexpr = [] coeff = S.One for term in expr.args: if term.is_Rational: coeff *= term continue if term.is_Pow: term = _denest_pow(term) if term.is_commutative: b, e = term.as_base_exp() if deep: b, e = [recurse(i) for i in [b, e]] if b.is_Pow or b.func is exp: # don't let smthg like sqrt(x**a) split into x**a, 1/2 # or else it will be joined as x**(a/2) later b, e = b**e, S.One c_powers[b].append(e) else: # This is the logic that combines exponents for equal, # but non-commutative bases: A**x*A**y == A**(x+y). if nc_part: b1, e1 = nc_part[-1].as_base_exp() b2, e2 = term.as_base_exp() if (b1 == b2 and e1.is_commutative and e2.is_commutative): nc_part[-1] = Pow(b1, Add(e1, e2)) continue nc_part.append(term) # add up exponents of common bases for b, e in ordered(iter(c_powers.items())): # allow 2**x/4 -> 2**(x - 2); don't do this when b and e are # Numbers since autoevaluation will undo it, e.g. # 2**(1/3)/4 -> 2**(1/3 - 2) -> 2**(1/3)/4 if (b and b.is_Number and not all(ei.is_Number for ei in e) and \ coeff is not S.One and b not in (S.One, S.NegativeOne)): m = multiplicity(abs(b), abs(coeff)) if m: e.append(m) coeff /= b**m c_powers[b] = Add(*e) if coeff is not S.One: if coeff in c_powers: c_powers[coeff] += S.One else: c_powers[coeff] = S.One # convert to plain dictionary c_powers = dict(c_powers) # check for base and inverted base pairs be = list(c_powers.items()) skip = set() # skip if we already saw them for b, e in be: if b in skip: continue bpos = b.is_positive or b.is_polar if bpos: binv = 1/b if b != binv and binv in c_powers: if b.as_numer_denom()[0] is S.One: c_powers.pop(b) c_powers[binv] -= e else: skip.add(binv) e = c_powers.pop(binv) c_powers[b] -= e # check for base and negated base pairs be = list(c_powers.items()) _n = S.NegativeOne for i, (b, e) in enumerate(be): if ((-b).is_Symbol or b.is_Add) and -b in c_powers: if (b.is_positive in (0, 1) or e.is_integer): c_powers[-b] += c_powers.pop(b) if _n in c_powers: c_powers[_n] += e else: c_powers[_n] = e # filter c_powers and convert to a list c_powers = [(b, e) for b, e in c_powers.items() if e] # ============================================================== # check for Mul bases of Rational powers that can be combined with # separated bases, e.g. x*sqrt(x*y)*sqrt(x*sqrt(x*y)) -> # (x*sqrt(x*y))**(3/2) # ---------------- helper functions def ratq(x): '''Return Rational part of x's exponent as it appears in the bkey. ''' return bkey(x)[0][1] def bkey(b, e=None): '''Return (b**s, c.q), c.p where e -> c*s. If e is not given then it will be taken by using as_base_exp() on the input b. e.g. x**3/2 -> (x, 2), 3 x**y -> (x**y, 1), 1 x**(2*y/3) -> (x**y, 3), 2 exp(x/2) -> (exp(a), 2), 1 ''' if e is not None: # coming from c_powers or from below if e.is_Integer: return (b, S.One), e elif e.is_Rational: return (b, Integer(e.q)), Integer(e.p) else: c, m = e.as_coeff_Mul(rational=True) if c is not S.One: if m.is_integer: return (b, Integer(c.q)), m*Integer(c.p) return (b**m, Integer(c.q)), Integer(c.p) else: return (b**e, S.One), S.One else: return bkey(*b.as_base_exp()) def update(b): '''Decide what to do with base, b. If its exponent is now an integer multiple of the Rational denominator, then remove it and put the factors of its base in the common_b dictionary or update the existing bases if necessary. If it has been zeroed out, simply remove the base. ''' newe, r = divmod(common_b[b], b[1]) if not r: common_b.pop(b) if newe: for m in Mul.make_args(b[0]**newe): b, e = bkey(m) if b not in common_b: common_b[b] = 0 common_b[b] += e if b[1] != 1: bases.append(b) # ---------------- end of helper functions # assemble a dictionary of the factors having a Rational power common_b = {} done = [] bases = [] for b, e in c_powers: b, e = bkey(b, e) if b in common_b.keys(): common_b[b] = common_b[b] + e else: common_b[b] = e if b[1] != 1 and b[0].is_Mul: bases.append(b) c_powers = [(b, e) for b, e in common_b.items() if e] bases.sort(key=default_sort_key) # this makes tie-breaking canonical bases.sort(key=measure, reverse=True) # handle longest first for base in bases: if base not in common_b: # it may have been removed already continue b, exponent = base last = False # True when no factor of base is a radical qlcm = 1 # the lcm of the radical denominators while True: bstart = b qstart = qlcm bb = [] # list of factors ee = [] # (factor's expo. and it's current value in common_b) for bi in Mul.make_args(b): bib, bie = bkey(bi) if bib not in common_b or common_b[bib] < bie: ee = bb = [] # failed break ee.append([bie, common_b[bib]]) bb.append(bib) if ee: # find the number of extractions possible # e.g. [(1, 2), (2, 2)] -> min(2/1, 2/2) -> 1 min1 = ee[0][1]/ee[0][0] for i in range(len(ee)): rat = ee[i][1]/ee[i][0] if rat < 1: break min1 = min(min1, rat) else: # update base factor counts # e.g. if ee = [(2, 5), (3, 6)] then min1 = 2 # and the new base counts will be 5-2*2 and 6-2*3 for i in range(len(bb)): common_b[bb[i]] -= min1*ee[i][0] update(bb[i]) # update the count of the base # e.g. x**2*y*sqrt(x*sqrt(y)) the count of x*sqrt(y) # will increase by 4 to give bkey (x*sqrt(y), 2, 5) common_b[base] += min1*qstart*exponent if (last # no more radicals in base or len(common_b) == 1 # nothing left to join with or all(k[1] == 1 for k in common_b) # no rad's in common_b ): break # see what we can exponentiate base by to remove any radicals # so we know what to search for # e.g. if base were x**(1/2)*y**(1/3) then we should # exponentiate by 6 and look for powers of x and y in the ratio # of 2 to 3 qlcm = lcm([ratq(bi) for bi in Mul.make_args(bstart)]) if qlcm == 1: break # we are done b = bstart**qlcm qlcm *= qstart if all(ratq(bi) == 1 for bi in Mul.make_args(b)): last = True # we are going to be done after this next pass # this base no longer can find anything to join with and # since it was longer than any other we are done with it b, q = base done.append((b, common_b.pop(base)*Rational(1, q))) # update c_powers and get ready to continue with powsimp c_powers = done # there may be terms still in common_b that were bases that were # identified as needing processing, so remove those, too for (b, q), e in common_b.items(): if (b.is_Pow or b.func is exp) and \ q is not S.One and not b.exp.is_Rational: b, be = b.as_base_exp() b = b**(be/q) else: b = root(b, q) c_powers.append((b, e)) check = len(c_powers) c_powers = dict(c_powers) assert len(c_powers) == check # there should have been no duplicates # ============================================================== # rebuild the expression newexpr = expr.func(*(newexpr + [Pow(b, e) for b, e in c_powers.items()])) if combine == 'exp': return expr.func(newexpr, expr.func(*nc_part)) else: return recurse(expr.func(*nc_part), combine='base') * \ recurse(newexpr, combine='base') elif combine == 'base': # Build c_powers and nc_part. These must both be lists not # dicts because exp's are not combined. c_powers = [] nc_part = [] for term in expr.args: if term.is_commutative: c_powers.append(list(term.as_base_exp())) else: # This is the logic that combines bases that are # different and non-commutative, but with equal and # commutative exponents: A**x*B**x == (A*B)**x. if nc_part: b1, e1 = nc_part[-1].as_base_exp() b2, e2 = term.as_base_exp() if (e1 == e2 and e2.is_commutative): nc_part[-1] = Pow(b1*b2, e1) continue nc_part.append(term) # Pull out numerical coefficients from exponent if assumptions allow # e.g., 2**(2*x) => 4**x for i in range(len(c_powers)): b, e = c_powers[i] if not (all(x.is_nonnegative for x in b.as_numer_denom()) or e.is_integer or force or b.is_polar): continue exp_c, exp_t = e.as_coeff_Mul(rational=True) if exp_c is not S.One and exp_t is not S.One: c_powers[i] = [Pow(b, exp_c), exp_t] # Combine bases whenever they have the same exponent and # assumptions allow # first gather the potential bases under the common exponent c_exp = defaultdict(list) for b, e in c_powers: if deep: e = recurse(e) c_exp[e].append(b) del c_powers # Merge back in the results of the above to form a new product c_powers = defaultdict(list) for e in c_exp: bases = c_exp[e] # calculate the new base for e if len(bases) == 1: new_base = bases[0] elif e.is_integer or force: new_base = expr.func(*bases) else: # see which ones can be joined unk = [] nonneg = [] neg = [] for bi in bases: if bi.is_negative: neg.append(bi) elif bi.is_nonnegative: nonneg.append(bi) elif bi.is_polar: nonneg.append( bi) # polar can be treated like non-negative else: unk.append(bi) if len(unk) == 1 and not neg or len(neg) == 1 and not unk: # a single neg or a single unk can join the rest nonneg.extend(unk + neg) unk = neg = [] elif neg: # their negative signs cancel in groups of 2*q if we know # that e = p/q else we have to treat them as unknown israt = False if e.is_Rational: israt = True else: p, d = e.as_numer_denom() if p.is_integer and d.is_integer: israt = True if israt: neg = [-w for w in neg] unk.extend([S.NegativeOne]*len(neg)) else: unk.extend(neg) neg = [] del israt # these shouldn't be joined for b in unk: c_powers[b].append(e) # here is a new joined base new_base = expr.func(*(nonneg + neg)) # if there are positive parts they will just get separated # again unless some change is made def _terms(e): # return the number of terms of this expression # when multiplied out -- assuming no joining of terms if e.is_Add: return sum([_terms(ai) for ai in e.args]) if e.is_Mul: return prod([_terms(mi) for mi in e.args]) return 1 xnew_base = expand_mul(new_base, deep=False) if len(Add.make_args(xnew_base)) < _terms(new_base): new_base = factor_terms(xnew_base) c_powers[new_base].append(e) # break out the powers from c_powers now c_part = [Pow(b, ei) for b, e in c_powers.items() for ei in e] # we're done return expr.func(*(c_part + nc_part)) else: raise ValueError("combine must be one of ('all', 'exp', 'base').") def powdenest(eq, force=False, polar=False): r""" Collect exponents on powers as assumptions allow. Given ``(bb**be)**e``, this can be simplified as follows: * if ``bb`` is positive, or * ``e`` is an integer, or * ``|be| < 1`` then this simplifies to ``bb**(be*e)`` Given a product of powers raised to a power, ``(bb1**be1 * bb2**be2...)**e``, simplification can be done as follows: - if e is positive, the gcd of all bei can be joined with e; - all non-negative bb can be separated from those that are negative and their gcd can be joined with e; autosimplification already handles this separation. - integer factors from powers that have integers in the denominator of the exponent can be removed from any term and the gcd of such integers can be joined with e Setting ``force`` to True will make symbols that are not explicitly negative behave as though they are positive, resulting in more denesting. Setting ``polar`` to True will do simplifications on the Riemann surface of the logarithm, also resulting in more denestings. When there are sums of logs in exp() then a product of powers may be obtained e.g. ``exp(3*(log(a) + 2*log(b)))`` - > ``a**3*b**6``. Examples ======== >>> from sympy.abc import a, b, x, y, z >>> from sympy import Symbol, exp, log, sqrt, symbols, powdenest >>> powdenest((x**(2*a/3))**(3*x)) (x**(2*a/3))**(3*x) >>> powdenest(exp(3*x*log(2))) 2**(3*x) Assumptions may prevent expansion: >>> powdenest(sqrt(x**2)) sqrt(x**2) >>> p = symbols('p', positive=True) >>> powdenest(sqrt(p**2)) p No other expansion is done. >>> i, j = symbols('i,j', integer=True) >>> powdenest((x**x)**(i + j)) # -X-> (x**x)**i*(x**x)**j x**(x*(i + j)) But exp() will be denested by moving all non-log terms outside of the function; this may result in the collapsing of the exp to a power with a different base: >>> powdenest(exp(3*y*log(x))) x**(3*y) >>> powdenest(exp(y*(log(a) + log(b)))) (a*b)**y >>> powdenest(exp(3*(log(a) + log(b)))) a**3*b**3 If assumptions allow, symbols can also be moved to the outermost exponent: >>> i = Symbol('i', integer=True) >>> powdenest(((x**(2*i))**(3*y))**x) ((x**(2*i))**(3*y))**x >>> powdenest(((x**(2*i))**(3*y))**x, force=True) x**(6*i*x*y) >>> powdenest(((x**(2*a/3))**(3*y/i))**x) ((x**(2*a/3))**(3*y/i))**x >>> powdenest((x**(2*i)*y**(4*i))**z, force=True) (x*y**2)**(2*i*z) >>> n = Symbol('n', negative=True) >>> powdenest((x**i)**y, force=True) x**(i*y) >>> powdenest((n**i)**x, force=True) (n**i)**x """ from sympy.simplify.simplify import posify if force: eq, rep = posify(eq) return powdenest(eq, force=False).xreplace(rep) if polar: eq, rep = polarify(eq) return unpolarify(powdenest(unpolarify(eq, exponents_only=True)), rep) new = powsimp(sympify(eq)) return new.xreplace(Transform( _denest_pow, filter=lambda m: m.is_Pow or m.func is exp)) _y = Dummy('y') def _denest_pow(eq): """ Denest powers. This is a helper function for powdenest that performs the actual transformation. """ from sympy.simplify.simplify import logcombine b, e = eq.as_base_exp() if b.is_Pow or isinstance(b.func, exp) and e != 1: new = b._eval_power(e) if new is not None: eq = new b, e = new.as_base_exp() # denest exp with log terms in exponent if b is S.Exp1 and e.is_Mul: logs = [] other = [] for ei in e.args: if any(ai.func is log for ai in Add.make_args(ei)): logs.append(ei) else: other.append(ei) logs = logcombine(Mul(*logs)) return Pow(exp(logs), Mul(*other)) _, be = b.as_base_exp() if be is S.One and not (b.is_Mul or b.is_Rational and b.q != 1 or b.is_positive): return eq # denest eq which is either pos**e or Pow**e or Mul**e or # Mul(b1**e1, b2**e2) # handle polar numbers specially polars, nonpolars = [], [] for bb in Mul.make_args(b): if bb.is_polar: polars.append(bb.as_base_exp()) else: nonpolars.append(bb) if len(polars) == 1 and not polars[0][0].is_Mul: return Pow(polars[0][0], polars[0][1]*e)*powdenest(Mul(*nonpolars)**e) elif polars: return Mul(*[powdenest(bb**(ee*e)) for (bb, ee) in polars]) \ *powdenest(Mul(*nonpolars)**e) if b.is_Integer: # use log to see if there is a power here logb = expand_log(log(b)) if logb.is_Mul: c, logb = logb.args e *= c base = logb.args[0] return Pow(base, e) # if b is not a Mul or any factor is an atom then there is nothing to do if not b.is_Mul or any(s.is_Atom for s in Mul.make_args(b)): return eq # let log handle the case of the base of the argument being a Mul, e.g. # sqrt(x**(2*i)*y**(6*i)) -> x**i*y**(3**i) if x and y are positive; we # will take the log, expand it, and then factor out the common powers that # now appear as coefficient. We do this manually since terms_gcd pulls out # fractions, terms_gcd(x+x*y/2) -> x*(y + 2)/2 and we don't want the 1/2; # gcd won't pull out numerators from a fraction: gcd(3*x, 9*x/2) -> x but # we want 3*x. Neither work with noncommutatives. def nc_gcd(aa, bb): a, b = [i.as_coeff_Mul() for i in [aa, bb]] c = gcd(a[0], b[0]).as_numer_denom()[0] g = Mul(*(a[1].args_cnc(cset=True)[0] & b[1].args_cnc(cset=True)[0])) return _keep_coeff(c, g) glogb = expand_log(log(b)) if glogb.is_Add: args = glogb.args g = reduce(nc_gcd, args) if g != 1: cg, rg = g.as_coeff_Mul() glogb = _keep_coeff(cg, rg*Add(*[a/g for a in args])) # now put the log back together again if glogb.func is log or not glogb.is_Mul: if glogb.args[0].is_Pow or glogb.args[0].func is exp: glogb = _denest_pow(glogb.args[0]) if (abs(glogb.exp) < 1) == True: return Pow(glogb.base, glogb.exp*e) return eq # the log(b) was a Mul so join any adds with logcombine add = [] other = [] for a in glogb.args: if a.is_Add: add.append(a) else: other.append(a) return Pow(exp(logcombine(Mul(*add))), e*Mul(*other))
bsd-3-clause
Qinusty/rethinkdb
test/interface/system_changefeeds.py
6
4972
#!/usr/bin/env python # Copyright 2014-2015 RethinkDB, all rights reserved. """Check that changefeeds on system tablescorrectly notify when changes occur.""" import os, pprint, sys, time, threading, traceback sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common'))) import driver, scenario_common, utils, vcoptparse r = utils.import_python_driver() op = vcoptparse.OptParser() scenario_common.prepare_option_parser_mode_flags(op) _, command_prefix, server_options = scenario_common.parse_mode_flags(op.parse(sys.argv)) class AsyncChangefeed(threading.Thread): daemon = True conn = None query = None err = None changes = None def __init__(self, server, query): super(AsyncChangefeed, self).__init__() self.conn = r.connect(server.host, server.driver_port) self.changes = [] self.query = query self.start() time.sleep(0.5) def run(self): try: for x in self.query.changes().run(self.conn): # Throw away initial values if "old_val" in x: self.changes.append(x) except Exception as e: self.err = sys.exc_info() def check(self): if self.err is not None: utils.print_with_time("Exception from other thread:") traceback.print_exception(*self.err) sys.exit(1) with driver.Cluster(output_folder='.', ) as cluster: proc1 = driver.Process(cluster=cluster, name='a', server_tags='a_tag', console_output=True, command_prefix=command_prefix, extra_options=server_options) proc2 = driver.Process(cluster=cluster, name='b', server_tags='b_tag', console_output=True, command_prefix=command_prefix, extra_options=server_options) # This is necessary because a few log messages may be printed even after `wait_until_ready()` returns. time.sleep(5.0) conn = r.connect(proc1.host, proc1.driver_port) tables = ["cluster_config", "db_config", "current_issues", "logs", "server_config", "server_status", "table_config", "table_status"] feeds = { } for name in tables: feeds[name] = AsyncChangefeed(proc1, r.db('rethinkdb').table(name)) def check(expected, timer): time.sleep(timer) for name, feed in feeds.items(): feed.check() if name in expected: assert len(feed.changes) > 0, "Expected changes on %s, found none." % name feed.changes = [] else: assert len(feed.changes) == 0, "Expected no changes on %s, found %s." % (name, feed.changes) check([], 5.0) utils.print_with_time("Changing auth key...") res = r.db("rethinkdb").table("cluster_config").get("auth").update({"auth_key": "foo"}).run(conn) assert res["replaced"] == 1 and res["errors"] == 0, res res = r.db("rethinkdb").table("cluster_config").get("auth").update({"auth_key": None}).run(conn) check(["cluster_config"], 1.0) utils.print_with_time("Creating database...") res = r.db_create("test").run(conn) assert res.get("dbs_created", 0) == 1, res check(["db_config"], 1.0) utils.print_with_time("Creating tables...") res = r.table_create("test", replicas={"a_tag": 1}, primary_replica_tag="a_tag").run(conn) assert res["tables_created"] == 1, res res = r.table_create("test2", replicas={"b_tag": 1}, primary_replica_tag="b_tag").run(conn) assert res["tables_created"] == 1, res check(["table_config", "table_status", "logs"], 1.5) utils.print_with_time("Creating feeds...") feeds["test_config"] = AsyncChangefeed(proc1, r.table('test').config()) feeds["test_status"] = AsyncChangefeed(proc1, r.table('test').status()) feeds["test2_config"] = AsyncChangefeed(proc1, r.table('test2').config()) feeds["test2_status"] = AsyncChangefeed(proc1, r.table('test2').status()) utils.print_with_time("Adding replicas...") res = r.table("test").config().update({"shards": [{"primary_replica": "a", "replicas": ["a", "b"]}]}).run(conn) assert res["errors"] == 0, res r.table("test").wait().run(conn) check(["table_config", "table_status", "test_config", "test_status", "logs"], 1.5) utils.print_with_time("Renaming server...") res = r.db("rethinkdb").table("server_config").filter({"name": "b"}).update({"name": "c"}).run(conn) assert res["replaced"] == 1 and res["errors"] == 0, res check(["logs", "server_config", "server_status", "table_config", "table_status", "test_config", "test_status", "test2_config", "test2_status"], 1.5) utils.print_with_time("Killing one server...") proc2.check_and_stop() check(["logs", "server_config", "server_status", "table_config", "table_status", "current_issues", "test_status", "test2_config", "test2_status"], 1.5) utils.print_with_time("Shutting everything down...") utils.print_with_time("Done.")
agpl-3.0
arante/pyloc
microblog/flask/lib/python3.5/site-packages/pbr/util.py
22
26697
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # # Copyright (C) 2013 Association of Universities for Research in Astronomy # (AURA) # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # 3. The name of AURA and its representatives may not be used to # endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED # WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS # OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR # TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH # DAMAGE. """The code in this module is mostly copy/pasted out of the distutils2 source code, as recommended by Tarek Ziade. As such, it may be subject to some change as distutils2 development continues, and will have to be kept up to date. I didn't want to use it directly from distutils2 itself, since I do not want it to be an installation dependency for our packages yet--it is still too unstable (the latest version on PyPI doesn't even install). """ # These first two imports are not used, but are needed to get around an # irritating Python bug that can crop up when using ./setup.py test. # See: http://www.eby-sarna.com/pipermail/peak/2010-May/003355.html try: import multiprocessing # flake8: noqa except ImportError: pass import logging # flake8: noqa import os import re import sys import traceback from collections import defaultdict import distutils.ccompiler import pkg_resources from distutils import log from distutils import errors from setuptools.command.egg_info import manifest_maker from setuptools import dist as st_dist from setuptools import extension try: import ConfigParser as configparser except ImportError: import configparser from pbr import extra_files import pbr.hooks # A simplified RE for this; just checks that the line ends with version # predicates in () _VERSION_SPEC_RE = re.compile(r'\s*(.*?)\s*\((.*)\)\s*$') # Mappings from setup() keyword arguments to setup.cfg options; # The values are (section, option) tuples, or simply (section,) tuples if # the option has the same name as the setup() argument D1_D2_SETUP_ARGS = { "name": ("metadata",), "version": ("metadata",), "author": ("metadata",), "author_email": ("metadata",), "maintainer": ("metadata",), "maintainer_email": ("metadata",), "url": ("metadata", "home_page"), "description": ("metadata", "summary"), "keywords": ("metadata",), "long_description": ("metadata", "description"), "download_url": ("metadata",), "classifiers": ("metadata", "classifier"), "platforms": ("metadata", "platform"), # ** "license": ("metadata",), # Use setuptools install_requires, not # broken distutils requires "install_requires": ("metadata", "requires_dist"), "setup_requires": ("metadata", "setup_requires_dist"), "provides": ("metadata", "provides_dist"), # ** "obsoletes": ("metadata", "obsoletes_dist"), # ** "package_dir": ("files", 'packages_root'), "packages": ("files",), "package_data": ("files",), "namespace_packages": ("files",), "data_files": ("files",), "scripts": ("files",), "py_modules": ("files", "modules"), # ** "cmdclass": ("global", "commands"), # Not supported in distutils2, but provided for # backwards compatibility with setuptools "use_2to3": ("backwards_compat", "use_2to3"), "zip_safe": ("backwards_compat", "zip_safe"), "tests_require": ("backwards_compat", "tests_require"), "dependency_links": ("backwards_compat",), "include_package_data": ("backwards_compat",), } # setup() arguments that can have multiple values in setup.cfg MULTI_FIELDS = ("classifiers", "platforms", "install_requires", "provides", "obsoletes", "namespace_packages", "packages", "package_data", "data_files", "scripts", "py_modules", "dependency_links", "setup_requires", "tests_require", "cmdclass") # setup() arguments that contain boolean values BOOL_FIELDS = ("use_2to3", "zip_safe", "include_package_data") CSV_FIELDS = ("keywords",) def resolve_name(name): """Resolve a name like ``module.object`` to an object and return it. Raise ImportError if the module or name is not found. """ parts = name.split('.') cursor = len(parts) - 1 module_name = parts[:cursor] attr_name = parts[-1] while cursor > 0: try: ret = __import__('.'.join(module_name), fromlist=[attr_name]) break except ImportError: if cursor == 0: raise cursor -= 1 module_name = parts[:cursor] attr_name = parts[cursor] ret = '' for part in parts[cursor:]: try: ret = getattr(ret, part) except AttributeError: raise ImportError(name) return ret def cfg_to_args(path='setup.cfg', script_args=()): """Distutils2 to distutils1 compatibility util. This method uses an existing setup.cfg to generate a dictionary of keywords that can be used by distutils.core.setup(kwargs**). :param file: The setup.cfg path. :parm script_args: List of commands setup.py was called with. :raises DistutilsFileError: When the setup.cfg file is not found. """ # The method source code really starts here. if sys.version_info >= (3, 2): parser = configparser.ConfigParser() else: parser = configparser.SafeConfigParser() if not os.path.exists(path): raise errors.DistutilsFileError("file '%s' does not exist" % os.path.abspath(path)) parser.read(path) config = {} for section in parser.sections(): config[section] = dict() for k, value in parser.items(section): config[section][k.replace('-', '_')] = value # Run setup_hooks, if configured setup_hooks = has_get_option(config, 'global', 'setup_hooks') package_dir = has_get_option(config, 'files', 'packages_root') # Add the source package directory to sys.path in case it contains # additional hooks, and to make sure it's on the path before any existing # installations of the package if package_dir: package_dir = os.path.abspath(package_dir) sys.path.insert(0, package_dir) try: if setup_hooks: setup_hooks = [ hook for hook in split_multiline(setup_hooks) if hook != 'pbr.hooks.setup_hook'] for hook in setup_hooks: hook_fn = resolve_name(hook) try : hook_fn(config) except SystemExit: log.error('setup hook %s terminated the installation') except: e = sys.exc_info()[1] log.error('setup hook %s raised exception: %s\n' % (hook, e)) log.error(traceback.format_exc()) sys.exit(1) # Run the pbr hook pbr.hooks.setup_hook(config) kwargs = setup_cfg_to_setup_kwargs(config, script_args) # Set default config overrides kwargs['include_package_data'] = True kwargs['zip_safe'] = False register_custom_compilers(config) ext_modules = get_extension_modules(config) if ext_modules: kwargs['ext_modules'] = ext_modules entry_points = get_entry_points(config) if entry_points: kwargs['entry_points'] = entry_points wrap_commands(kwargs) # Handle the [files]/extra_files option files_extra_files = has_get_option(config, 'files', 'extra_files') if files_extra_files: extra_files.set_extra_files(split_multiline(files_extra_files)) finally: # Perform cleanup if any paths were added to sys.path if package_dir: sys.path.pop(0) return kwargs def setup_cfg_to_setup_kwargs(config, script_args=()): """Processes the setup.cfg options and converts them to arguments accepted by setuptools' setup() function. """ kwargs = {} # Temporarily holds install_requires and extra_requires while we # parse env_markers. all_requirements = {} for arg in D1_D2_SETUP_ARGS: if len(D1_D2_SETUP_ARGS[arg]) == 2: # The distutils field name is different than distutils2's. section, option = D1_D2_SETUP_ARGS[arg] elif len(D1_D2_SETUP_ARGS[arg]) == 1: # The distutils field name is the same thant distutils2's. section = D1_D2_SETUP_ARGS[arg][0] option = arg in_cfg_value = has_get_option(config, section, option) if not in_cfg_value: # There is no such option in the setup.cfg if arg == "long_description": in_cfg_value = has_get_option(config, section, "description_file") if in_cfg_value: in_cfg_value = split_multiline(in_cfg_value) value = '' for filename in in_cfg_value: description_file = open(filename) try: value += description_file.read().strip() + '\n\n' finally: description_file.close() in_cfg_value = value else: continue if arg in CSV_FIELDS: in_cfg_value = split_csv(in_cfg_value) if arg in MULTI_FIELDS: in_cfg_value = split_multiline(in_cfg_value) elif arg in BOOL_FIELDS: # Provide some flexibility here... if in_cfg_value.lower() in ('true', 't', '1', 'yes', 'y'): in_cfg_value = True else: in_cfg_value = False if in_cfg_value: if arg in ('install_requires', 'tests_require'): # Replaces PEP345-style version specs with the sort expected by # setuptools in_cfg_value = [_VERSION_SPEC_RE.sub(r'\1\2', pred) for pred in in_cfg_value] if arg == 'install_requires': # Split install_requires into package,env_marker tuples # These will be re-assembled later install_requires = [] requirement_pattern = '(?P<package>[^;]*);?(?P<env_marker>[^#]*?)(?:\s*#.*)?$' for requirement in in_cfg_value: m = re.match(requirement_pattern, requirement) requirement_package = m.group('package').strip() env_marker = m.group('env_marker').strip() install_requires.append((requirement_package,env_marker)) all_requirements[''] = install_requires elif arg == 'package_dir': in_cfg_value = {'': in_cfg_value} elif arg in ('package_data', 'data_files'): data_files = {} firstline = True prev = None for line in in_cfg_value: if '=' in line: key, value = line.split('=', 1) key, value = (key.strip(), value.strip()) if key in data_files: # Multiple duplicates of the same package name; # this is for backwards compatibility of the old # format prior to d2to1 0.2.6. prev = data_files[key] prev.extend(value.split()) else: prev = data_files[key.strip()] = value.split() elif firstline: raise errors.DistutilsOptionError( 'malformed package_data first line %r (misses ' '"=")' % line) else: prev.extend(line.strip().split()) firstline = False if arg == 'data_files': # the data_files value is a pointlessly different structure # from the package_data value data_files = data_files.items() in_cfg_value = data_files elif arg == 'cmdclass': cmdclass = {} dist = st_dist.Distribution() for cls_name in in_cfg_value: cls = resolve_name(cls_name) cmd = cls(dist) cmdclass[cmd.get_command_name()] = cls in_cfg_value = cmdclass kwargs[arg] = in_cfg_value # Transform requirements with embedded environment markers to # setuptools' supported marker-per-requirement format. # # install_requires are treated as a special case of extras, before # being put back in the expected place # # fred = # foo:marker # bar # -> {'fred': ['bar'], 'fred:marker':['foo']} if 'extras' in config: requirement_pattern = '(?P<package>[^:]*):?(?P<env_marker>[^#]*?)(?:\s*#.*)?$' extras = config['extras'] for extra in extras: extra_requirements = [] requirements = split_multiline(extras[extra]) for requirement in requirements: m = re.match(requirement_pattern, requirement) extras_value = m.group('package').strip() env_marker = m.group('env_marker') extra_requirements.append((extras_value,env_marker)) all_requirements[extra] = extra_requirements # Transform the full list of requirements into: # - install_requires, for those that have no extra and no # env_marker # - named extras, for those with an extra name (which may include # an env_marker) # - and as a special case, install_requires with an env_marker are # treated as named extras where the name is the empty string extras_require = {} for req_group in all_requirements: for requirement, env_marker in all_requirements[req_group]: if env_marker: extras_key = '%s:(%s)' % (req_group, env_marker) # We do not want to poison wheel creation with locally # evaluated markers. sdists always re-create the egg_info # and as such do not need guarded, and pip will never call # multiple setup.py commands at once. if 'bdist_wheel' not in script_args: try: if pkg_resources.evaluate_marker('(%s)' % env_marker): extras_key = req_group except SyntaxError: log.error( "Marker evaluation failed, see the following " "error. For more information see: " "http://docs.openstack.org/" "developer/pbr/compatibility.html#evaluate-marker" ) raise else: extras_key = req_group extras_require.setdefault(extras_key, []).append(requirement) kwargs['install_requires'] = extras_require.pop('', []) kwargs['extras_require'] = extras_require return kwargs def register_custom_compilers(config): """Handle custom compilers; this has no real equivalent in distutils, where additional compilers could only be added programmatically, so we have to hack it in somehow. """ compilers = has_get_option(config, 'global', 'compilers') if compilers: compilers = split_multiline(compilers) for compiler in compilers: compiler = resolve_name(compiler) # In distutils2 compilers these class attributes exist; for # distutils1 we just have to make something up if hasattr(compiler, 'name'): name = compiler.name else: name = compiler.__name__ if hasattr(compiler, 'description'): desc = compiler.description else: desc = 'custom compiler %s' % name module_name = compiler.__module__ # Note; this *will* override built in compilers with the same name # TODO: Maybe display a warning about this? cc = distutils.ccompiler.compiler_class cc[name] = (module_name, compiler.__name__, desc) # HACK!!!! Distutils assumes all compiler modules are in the # distutils package sys.modules['distutils.' + module_name] = sys.modules[module_name] def get_extension_modules(config): """Handle extension modules""" EXTENSION_FIELDS = ("sources", "include_dirs", "define_macros", "undef_macros", "library_dirs", "libraries", "runtime_library_dirs", "extra_objects", "extra_compile_args", "extra_link_args", "export_symbols", "swig_opts", "depends") ext_modules = [] for section in config: if ':' in section: labels = section.split(':', 1) else: # Backwards compatibility for old syntax; don't use this though labels = section.split('=', 1) labels = [l.strip() for l in labels] if (len(labels) == 2) and (labels[0] == 'extension'): ext_args = {} for field in EXTENSION_FIELDS: value = has_get_option(config, section, field) # All extension module options besides name can have multiple # values if not value: continue value = split_multiline(value) if field == 'define_macros': macros = [] for macro in value: macro = macro.split('=', 1) if len(macro) == 1: macro = (macro[0].strip(), None) else: macro = (macro[0].strip(), macro[1].strip()) macros.append(macro) value = macros ext_args[field] = value if ext_args: if 'name' not in ext_args: ext_args['name'] = labels[1] ext_modules.append(extension.Extension(ext_args.pop('name'), **ext_args)) return ext_modules def get_entry_points(config): """Process the [entry_points] section of setup.cfg to handle setuptools entry points. This is, of course, not a standard feature of distutils2/packaging, but as there is not currently a standard alternative in packaging, we provide support for them. """ if not 'entry_points' in config: return {} return dict((option, split_multiline(value)) for option, value in config['entry_points'].items()) def wrap_commands(kwargs): dist = st_dist.Distribution() # This should suffice to get the same config values and command classes # that the actual Distribution will see (not counting cmdclass, which is # handled below) dist.parse_config_files() # Setuptools doesn't patch get_command_list, and as such we do not get # extra commands from entry_points. As we need to be compatable we deal # with this here. for ep in pkg_resources.iter_entry_points('distutils.commands'): if ep.name not in dist.cmdclass: if hasattr(ep, 'resolve'): cmdclass = ep.resolve() else: # Old setuptools does not have ep.resolve, and load with # arguments is depricated in 11+. Use resolve, 12+, if we # can, otherwise fall back to load. # Setuptools 11 will throw a deprication warning, as it # uses _load instead of resolve. cmdclass = ep.load(False) dist.cmdclass[ep.name] = cmdclass for cmd, _ in dist.get_command_list(): hooks = {} for opt, val in dist.get_option_dict(cmd).items(): val = val[1] if opt.startswith('pre_hook.') or opt.startswith('post_hook.'): hook_type, alias = opt.split('.', 1) hook_dict = hooks.setdefault(hook_type, {}) hook_dict[alias] = val if not hooks: continue if 'cmdclass' in kwargs and cmd in kwargs['cmdclass']: cmdclass = kwargs['cmdclass'][cmd] else: cmdclass = dist.get_command_class(cmd) new_cmdclass = wrap_command(cmd, cmdclass, hooks) kwargs.setdefault('cmdclass', {})[cmd] = new_cmdclass def wrap_command(cmd, cmdclass, hooks): def run(self, cmdclass=cmdclass): self.run_command_hooks('pre_hook') cmdclass.run(self) self.run_command_hooks('post_hook') return type(cmd, (cmdclass, object), {'run': run, 'run_command_hooks': run_command_hooks, 'pre_hook': hooks.get('pre_hook'), 'post_hook': hooks.get('post_hook')}) def run_command_hooks(cmd_obj, hook_kind): """Run hooks registered for that command and phase. *cmd_obj* is a finalized command object; *hook_kind* is either 'pre_hook' or 'post_hook'. """ if hook_kind not in ('pre_hook', 'post_hook'): raise ValueError('invalid hook kind: %r' % hook_kind) hooks = getattr(cmd_obj, hook_kind, None) if hooks is None: return for hook in hooks.values(): if isinstance(hook, str): try: hook_obj = resolve_name(hook) except ImportError: err = sys.exc_info()[1] # For py3k raise errors.DistutilsModuleError('cannot find hook %s: %s' % (hook,err)) else: hook_obj = hook if not hasattr(hook_obj, '__call__'): raise errors.DistutilsOptionError('hook %r is not callable' % hook) log.info('running %s %s for command %s', hook_kind, hook, cmd_obj.get_command_name()) try : hook_obj(cmd_obj) except: e = sys.exc_info()[1] log.error('hook %s raised exception: %s\n' % (hook, e)) log.error(traceback.format_exc()) sys.exit(1) def has_get_option(config, section, option): if section in config and option in config[section]: return config[section][option] else: return False def split_multiline(value): """Special behaviour when we have a multi line options""" value = [element for element in (line.strip() for line in value.split('\n')) if element] return value def split_csv(value): """Special behaviour when we have a comma separated options""" value = [element for element in (chunk.strip() for chunk in value.split(',')) if element] return value def monkeypatch_method(cls): """A function decorator to monkey-patch a method of the same name on the given class. """ def wrapper(func): orig = getattr(cls, func.__name__, None) if orig and not hasattr(orig, '_orig'): # Already patched setattr(func, '_orig', orig) setattr(cls, func.__name__, func) return func return wrapper # The following classes are used to hack Distribution.command_options a bit class DefaultGetDict(defaultdict): """Like defaultdict, but the get() method also sets and returns the default value. """ def get(self, key, default=None): if default is None: default = self.default_factory() return super(DefaultGetDict, self).setdefault(key, default) class IgnoreDict(dict): """A dictionary that ignores any insertions in which the key is a string matching any string in `ignore`. The ignore list can also contain wildcard patterns using '*'. """ def __init__(self, ignore): self.__ignore = re.compile(r'(%s)' % ('|'.join( [pat.replace('*', '.*') for pat in ignore]))) def __setitem__(self, key, val): if self.__ignore.match(key): return super(IgnoreDict, self).__setitem__(key, val)
gpl-3.0
jank3/django
django/template/loaders/filesystem.py
418
2158
""" Wrapper for loading templates from the filesystem. """ import errno import io import warnings from django.core.exceptions import SuspiciousFileOperation from django.template import Origin, TemplateDoesNotExist from django.utils._os import safe_join from django.utils.deprecation import RemovedInDjango20Warning from .base import Loader as BaseLoader class Loader(BaseLoader): def get_dirs(self): return self.engine.dirs def get_contents(self, origin): try: with io.open(origin.name, encoding=self.engine.file_charset) as fp: return fp.read() except IOError as e: if e.errno == errno.ENOENT: raise TemplateDoesNotExist(origin) raise def get_template_sources(self, template_name, template_dirs=None): """ Return an Origin object pointing to an absolute path in each directory in template_dirs. For security reasons, if a path doesn't lie inside one of the template_dirs it is excluded from the result set. """ if not template_dirs: template_dirs = self.get_dirs() for template_dir in template_dirs: try: name = safe_join(template_dir, template_name) except SuspiciousFileOperation: # The joined path was located outside of this template_dir # (it might be inside another one, so this isn't fatal). continue yield Origin( name=name, template_name=template_name, loader=self, ) def load_template_source(self, template_name, template_dirs=None): warnings.warn( 'The load_template_sources() method is deprecated. Use ' 'get_template() or get_contents() instead.', RemovedInDjango20Warning, ) for origin in self.get_template_sources(template_name, template_dirs): try: return self.get_contents(origin), origin.name except TemplateDoesNotExist: pass raise TemplateDoesNotExist(template_name)
bsd-3-clause
PlayUAV/MissionPlanner
Lib/email/mime/text.py
71
1036
# Copyright (C) 2001-2006 Python Software Foundation # Author: Barry Warsaw # Contact: email-sig@python.org """Class representing text/* type MIME documents.""" __all__ = ['MIMEText'] from email.encoders import encode_7or8bit from email.mime.nonmultipart import MIMENonMultipart class MIMEText(MIMENonMultipart): """Class for generating text/* type MIME documents.""" def __init__(self, _text, _subtype='plain', _charset='us-ascii'): """Create a text/* type MIME document. _text is the string for this message object. _subtype is the MIME sub content type, defaulting to "plain". _charset is the character set parameter added to the Content-Type header. This defaults to "us-ascii". Note that as a side-effect, the Content-Transfer-Encoding header will also be set. """ MIMENonMultipart.__init__(self, 'text', _subtype, **{'charset': _charset}) self.set_payload(_text, _charset)
gpl-3.0
DONIKAN/django
tests/auth_tests/test_tokens.py
297
2551
import unittest from datetime import date, timedelta from django.conf import settings from django.contrib.auth.models import User from django.contrib.auth.tokens import PasswordResetTokenGenerator from django.test import TestCase from django.utils.six import PY3 class TokenGeneratorTest(TestCase): def test_make_token(self): """ Ensure that we can make a token and that it is valid """ user = User.objects.create_user('tokentestuser', 'test2@example.com', 'testpw') p0 = PasswordResetTokenGenerator() tk1 = p0.make_token(user) self.assertTrue(p0.check_token(user, tk1)) def test_10265(self): """ Ensure that the token generated for a user created in the same request will work correctly. """ # See ticket #10265 user = User.objects.create_user('comebackkid', 'test3@example.com', 'testpw') p0 = PasswordResetTokenGenerator() tk1 = p0.make_token(user) reload = User.objects.get(username='comebackkid') tk2 = p0.make_token(reload) self.assertEqual(tk1, tk2) def test_timeout(self): """ Ensure we can use the token after n days, but no greater. """ # Uses a mocked version of PasswordResetTokenGenerator so we can change # the value of 'today' class Mocked(PasswordResetTokenGenerator): def __init__(self, today): self._today_val = today def _today(self): return self._today_val user = User.objects.create_user('tokentestuser', 'test2@example.com', 'testpw') p0 = PasswordResetTokenGenerator() tk1 = p0.make_token(user) p1 = Mocked(date.today() + timedelta(settings.PASSWORD_RESET_TIMEOUT_DAYS)) self.assertTrue(p1.check_token(user, tk1)) p2 = Mocked(date.today() + timedelta(settings.PASSWORD_RESET_TIMEOUT_DAYS + 1)) self.assertFalse(p2.check_token(user, tk1)) @unittest.skipIf(PY3, "Unnecessary test with Python 3") def test_date_length(self): """ Make sure we don't allow overly long dates, causing a potential DoS. """ user = User.objects.create_user('ima1337h4x0r', 'test4@example.com', 'p4ssw0rd') p0 = PasswordResetTokenGenerator() # This will put a 14-digit base36 timestamp into the token, which is too large. self.assertRaises(ValueError, p0._make_token_with_timestamp, user, 175455491841851871349)
bsd-3-clause
willdecker/suds
suds/properties.py
203
16223
# This program is free software; you can redistribute it and/or modify # it under the terms of the (LGPL) GNU Lesser General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Library Lesser General Public License for more details at # ( http://www.gnu.org/licenses/lgpl.html ). # # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # written by: Jeff Ortel ( jortel@redhat.com ) """ Properties classes. """ from logging import getLogger log = getLogger(__name__) class AutoLinker(object): """ Base class, provides interface for I{automatic} link management between a L{Properties} object and the L{Properties} contained within I{values}. """ def updated(self, properties, prev, next): """ Notification that a values was updated and the linkage between the I{properties} contained with I{prev} need to be relinked to the L{Properties} contained within the I{next} value. """ pass class Link(object): """ Property link object. @ivar endpoints: A tuple of the (2) endpoints of the link. @type endpoints: tuple(2) """ def __init__(self, a, b): """ @param a: Property (A) to link. @type a: L{Property} @param b: Property (B) to link. @type b: L{Property} """ pA = Endpoint(self, a) pB = Endpoint(self, b) self.endpoints = (pA, pB) self.validate(a, b) a.links.append(pB) b.links.append(pA) def validate(self, pA, pB): """ Validate that the two properties may be linked. @param pA: Endpoint (A) to link. @type pA: L{Endpoint} @param pB: Endpoint (B) to link. @type pB: L{Endpoint} @return: self @rtype: L{Link} """ if pA in pB.links or \ pB in pA.links: raise Exception, 'Already linked' dA = pA.domains() dB = pB.domains() for d in dA: if d in dB: raise Exception, 'Duplicate domain "%s" found' % d for d in dB: if d in dA: raise Exception, 'Duplicate domain "%s" found' % d kA = pA.keys() kB = pB.keys() for k in kA: if k in kB: raise Exception, 'Duplicate key %s found' % k for k in kB: if k in kA: raise Exception, 'Duplicate key %s found' % k return self def teardown(self): """ Teardown the link. Removes endpoints from properties I{links} collection. @return: self @rtype: L{Link} """ pA, pB = self.endpoints if pA in pB.links: pB.links.remove(pA) if pB in pA.links: pA.links.remove(pB) return self class Endpoint(object): """ Link endpoint (wrapper). @ivar link: The associated link. @type link: L{Link} @ivar target: The properties object. @type target: L{Property} """ def __init__(self, link, target): self.link = link self.target = target def teardown(self): return self.link.teardown() def __eq__(self, rhs): return ( self.target == rhs ) def __hash__(self): return hash(self.target) def __getattr__(self, name): return getattr(self.target, name) class Definition: """ Property definition. @ivar name: The property name. @type name: str @ivar classes: The (class) list of permitted values @type classes: tuple @ivar default: The default value. @ivar type: any """ def __init__(self, name, classes, default, linker=AutoLinker()): """ @param name: The property name. @type name: str @param classes: The (class) list of permitted values @type classes: tuple @param default: The default value. @type default: any """ if not isinstance(classes, (list, tuple)): classes = (classes,) self.name = name self.classes = classes self.default = default self.linker = linker def nvl(self, value=None): """ Convert the I{value} into the default when I{None}. @param value: The proposed value. @type value: any @return: The I{default} when I{value} is I{None}, else I{value}. @rtype: any """ if value is None: return self.default else: return value def validate(self, value): """ Validate the I{value} is of the correct class. @param value: The value to validate. @type value: any @raise AttributeError: When I{value} is invalid. """ if value is None: return if len(self.classes) and \ not isinstance(value, self.classes): msg = '"%s" must be: %s' % (self.name, self.classes) raise AttributeError,msg def __repr__(self): return '%s: %s' % (self.name, str(self)) def __str__(self): s = [] if len(self.classes): s.append('classes=%s' % str(self.classes)) else: s.append('classes=*') s.append("default=%s" % str(self.default)) return ', '.join(s) class Properties: """ Represents basic application properties. Provides basic type validation, default values and link/synchronization behavior. @ivar domain: The domain name. @type domain: str @ivar definitions: A table of property definitions. @type definitions: {name: L{Definition}} @ivar links: A list of linked property objects used to create a network of properties. @type links: [L{Property},..] @ivar defined: A dict of property values. @type defined: dict """ def __init__(self, domain, definitions, kwargs): """ @param domain: The property domain name. @type domain: str @param definitions: A table of property definitions. @type definitions: {name: L{Definition}} @param kwargs: A list of property name/values to set. @type kwargs: dict """ self.definitions = {} for d in definitions: self.definitions[d.name] = d self.domain = domain self.links = [] self.defined = {} self.modified = set() self.prime() self.update(kwargs) def definition(self, name): """ Get the definition for the property I{name}. @param name: The property I{name} to find the definition for. @type name: str @return: The property definition @rtype: L{Definition} @raise AttributeError: On not found. """ d = self.definitions.get(name) if d is None: raise AttributeError(name) return d def update(self, other): """ Update the property values as specified by keyword/value. @param other: An object to update from. @type other: (dict|L{Properties}) @return: self @rtype: L{Properties} """ if isinstance(other, Properties): other = other.defined for n,v in other.items(): self.set(n, v) return self def notset(self, name): """ Get whether a property has never been set by I{name}. @param name: A property name. @type name: str @return: True if never been set. @rtype: bool """ self.provider(name).__notset(name) def set(self, name, value): """ Set the I{value} of a property by I{name}. The value is validated against the definition and set to the default when I{value} is None. @param name: The property name. @type name: str @param value: The new property value. @type value: any @return: self @rtype: L{Properties} """ self.provider(name).__set(name, value) return self def unset(self, name): """ Unset a property by I{name}. @param name: A property name. @type name: str @return: self @rtype: L{Properties} """ self.provider(name).__set(name, None) return self def get(self, name, *df): """ Get the value of a property by I{name}. @param name: The property name. @type name: str @param df: An optional value to be returned when the value is not set @type df: [1]. @return: The stored value, or I{df[0]} if not set. @rtype: any """ return self.provider(name).__get(name, *df) def link(self, other): """ Link (associate) this object with anI{other} properties object to create a network of properties. Links are bidirectional. @param other: The object to link. @type other: L{Properties} @return: self @rtype: L{Properties} """ Link(self, other) return self def unlink(self, *others): """ Unlink (disassociate) the specified properties object. @param others: The list object to unlink. Unspecified means unlink all. @type others: [L{Properties},..] @return: self @rtype: L{Properties} """ if not len(others): others = self.links[:] for p in self.links[:]: if p in others: p.teardown() return self def provider(self, name, history=None): """ Find the provider of the property by I{name}. @param name: The property name. @type name: str @param history: A history of nodes checked to prevent circular hunting. @type history: [L{Properties},..] @return: The provider when found. Otherwise, None (when nested) and I{self} when not nested. @rtype: L{Properties} """ if history is None: history = [] history.append(self) if name in self.definitions: return self for x in self.links: if x in history: continue provider = x.provider(name, history) if provider is not None: return provider history.remove(self) if len(history): return None return self def keys(self, history=None): """ Get the set of I{all} property names. @param history: A history of nodes checked to prevent circular hunting. @type history: [L{Properties},..] @return: A set of property names. @rtype: list """ if history is None: history = [] history.append(self) keys = set() keys.update(self.definitions.keys()) for x in self.links: if x in history: continue keys.update(x.keys(history)) history.remove(self) return keys def domains(self, history=None): """ Get the set of I{all} domain names. @param history: A history of nodes checked to prevent circular hunting. @type history: [L{Properties},..] @return: A set of domain names. @rtype: list """ if history is None: history = [] history.append(self) domains = set() domains.add(self.domain) for x in self.links: if x in history: continue domains.update(x.domains(history)) history.remove(self) return domains def prime(self): """ Prime the stored values based on default values found in property definitions. @return: self @rtype: L{Properties} """ for d in self.definitions.values(): self.defined[d.name] = d.default return self def __notset(self, name): return not (name in self.modified) def __set(self, name, value): d = self.definition(name) d.validate(value) value = d.nvl(value) prev = self.defined[name] self.defined[name] = value self.modified.add(name) d.linker.updated(self, prev, value) def __get(self, name, *df): d = self.definition(name) value = self.defined.get(name) if value == d.default and len(df): value = df[0] return value def str(self, history): s = [] s.append('Definitions:') for d in self.definitions.values(): s.append('\t%s' % repr(d)) s.append('Content:') for d in self.defined.items(): s.append('\t%s' % str(d)) if self not in history: history.append(self) s.append('Linked:') for x in self.links: s.append(x.str(history)) history.remove(self) return '\n'.join(s) def __repr__(self): return str(self) def __str__(self): return self.str([]) class Skin(object): """ The meta-programming I{skin} around the L{Properties} object. @ivar __pts__: The wrapped object. @type __pts__: L{Properties}. """ def __init__(self, domain, definitions, kwargs): self.__pts__ = Properties(domain, definitions, kwargs) def __setattr__(self, name, value): builtin = name.startswith('__') and name.endswith('__') if builtin: self.__dict__[name] = value return self.__pts__.set(name, value) def __getattr__(self, name): return self.__pts__.get(name) def __repr__(self): return str(self) def __str__(self): return str(self.__pts__) class Unskin(object): def __new__(self, *args, **kwargs): return args[0].__pts__ class Inspector: """ Wrapper inspector. """ def __init__(self, options): self.properties = options.__pts__ def get(self, name, *df): """ Get the value of a property by I{name}. @param name: The property name. @type name: str @param df: An optional value to be returned when the value is not set @type df: [1]. @return: The stored value, or I{df[0]} if not set. @rtype: any """ return self.properties.get(name, *df) def update(self, **kwargs): """ Update the property values as specified by keyword/value. @param kwargs: A list of property name/values to set. @type kwargs: dict @return: self @rtype: L{Properties} """ return self.properties.update(**kwargs) def link(self, other): """ Link (associate) this object with anI{other} properties object to create a network of properties. Links are bidirectional. @param other: The object to link. @type other: L{Properties} @return: self @rtype: L{Properties} """ p = other.__pts__ return self.properties.link(p) def unlink(self, other): """ Unlink (disassociate) the specified properties object. @param other: The object to unlink. @type other: L{Properties} @return: self @rtype: L{Properties} """ p = other.__pts__ return self.properties.unlink(p)
mit
blaze/dask
dask/dataframe/io/tests/test_sql.py
1
15033
from contextlib import contextmanager import io import pytest # import dask from dask.dataframe.io.sql import read_sql_table from dask.dataframe.utils import assert_eq, PANDAS_GT_0240 from dask.utils import tmpfile pd = pytest.importorskip("pandas") dd = pytest.importorskip("dask.dataframe") pytest.importorskip("sqlalchemy") pytest.importorskip("sqlite3") np = pytest.importorskip("numpy") data = """ name,number,age,negish Alice,0,33,-5 Bob,1,40,-3 Chris,2,22,3 Dora,3,16,5 Edith,4,53,0 Francis,5,30,0 Garreth,6,20,0 """ df = pd.read_csv(io.StringIO(data), index_col="number") @pytest.yield_fixture def db(): with tmpfile() as f: uri = "sqlite:///%s" % f df.to_sql("test", uri, index=True, if_exists="replace") yield uri def test_empty(db): from sqlalchemy import create_engine, MetaData, Table, Column, Integer with tmpfile() as f: uri = "sqlite:///%s" % f metadata = MetaData() engine = create_engine(uri) table = Table( "empty_table", metadata, Column("id", Integer, primary_key=True), Column("col2", Integer), ) metadata.create_all(engine) dask_df = read_sql_table(table.name, uri, index_col="id", npartitions=1) assert dask_df.index.name == "id" assert dask_df.col2.dtype == np.dtype("int64") pd_dataframe = dask_df.compute() assert pd_dataframe.empty is True def test_passing_engine_as_uri_raises_helpful_error(db): # https://github.com/dask/dask/issues/6473 from sqlalchemy import create_engine df = pd.DataFrame([{"i": i, "s": str(i) * 2} for i in range(4)]) ddf = dd.from_pandas(df, npartitions=2) with tmpfile() as f: db = "sqlite:///%s" % f engine = create_engine(db) with pytest.raises(ValueError, match="Expected URI to be a string"): ddf.to_sql("test", engine, if_exists="replace") @pytest.mark.skip( reason="Requires a postgres server. Sqlite does not support multiple schemas." ) def test_empty_other_schema(): from sqlalchemy import create_engine, MetaData, Table, Column, Integer, event, DDL # Database configurations. pg_host = "localhost" pg_port = "5432" pg_user = "user" pg_pass = "pass" pg_db = "db" db_url = "postgresql://%s:%s@%s:%s/%s" % (pg_user, pg_pass, pg_host, pg_port, pg_db) # Create an empty table in a different schema. table_name = "empty_table" schema_name = "other_schema" engine = create_engine(db_url) metadata = MetaData() table = Table( table_name, metadata, Column("id", Integer, primary_key=True), Column("col2", Integer), schema=schema_name, ) # Create the schema and the table. event.listen( metadata, "before_create", DDL("CREATE SCHEMA IF NOT EXISTS %s" % schema_name) ) metadata.create_all(engine) # Read the empty table from the other schema. dask_df = read_sql_table( table.name, db_url, index_col="id", schema=table.schema, npartitions=1 ) # Validate that the retrieved table is empty. assert dask_df.index.name == "id" assert dask_df.col2.dtype == np.dtype("int64") pd_dataframe = dask_df.compute() assert pd_dataframe.empty is True # Drop the schema and the table. engine.execute("DROP SCHEMA IF EXISTS %s CASCADE" % schema_name) def test_needs_rational(db): import datetime now = datetime.datetime.now() d = datetime.timedelta(seconds=1) df = pd.DataFrame( { "a": list("ghjkl"), "b": [now + i * d for i in range(5)], "c": [True, True, False, True, True], } ) df = df.append( [ {"a": "x", "b": now + d * 1000, "c": None}, {"a": None, "b": now + d * 1001, "c": None}, ] ) with tmpfile() as f: uri = "sqlite:///%s" % f df.to_sql("test", uri, index=False, if_exists="replace") # one partition contains NULL data = read_sql_table("test", uri, npartitions=2, index_col="b") df2 = df.set_index("b") assert_eq(data, df2.astype({"c": bool})) # bools are coerced # one partition contains NULL, but big enough head data = read_sql_table("test", uri, npartitions=2, index_col="b", head_rows=12) df2 = df.set_index("b") assert_eq(data, df2) # empty partitions data = read_sql_table("test", uri, npartitions=20, index_col="b") part = data.get_partition(12).compute() assert part.dtypes.tolist() == ["O", bool] assert part.empty df2 = df.set_index("b") assert_eq(data, df2.astype({"c": bool})) # explicit meta data = read_sql_table("test", uri, npartitions=2, index_col="b", meta=df2[:0]) part = data.get_partition(1).compute() assert part.dtypes.tolist() == ["O", "O"] df2 = df.set_index("b") assert_eq(data, df2) def test_simple(db): # single chunk data = read_sql_table("test", db, npartitions=2, index_col="number").compute() assert (data.name == df.name).all() assert data.index.name == "number" assert_eq(data, df) def test_npartitions(db): data = read_sql_table( "test", db, columns=list(df.columns), npartitions=2, index_col="number" ) assert len(data.divisions) == 3 assert (data.name.compute() == df.name).all() data = read_sql_table( "test", db, columns=["name"], npartitions=6, index_col="number" ) assert_eq(data, df[["name"]]) data = read_sql_table( "test", db, columns=list(df.columns), bytes_per_chunk="2 GiB", index_col="number", ) assert data.npartitions == 1 assert (data.name.compute() == df.name).all() data_1 = read_sql_table( "test", db, columns=list(df.columns), bytes_per_chunk=2 ** 30, index_col="number", head_rows=1, ) assert data_1.npartitions == 1 assert (data_1.name.compute() == df.name).all() data = read_sql_table( "test", db, columns=list(df.columns), bytes_per_chunk=250, index_col="number", head_rows=1, ) assert data.npartitions == 2 def test_divisions(db): data = read_sql_table( "test", db, columns=["name"], divisions=[0, 2, 4], index_col="number" ) assert data.divisions == (0, 2, 4) assert data.index.max().compute() == 4 assert_eq(data, df[["name"]][df.index <= 4]) def test_division_or_partition(db): with pytest.raises(TypeError): read_sql_table( "test", db, columns=["name"], index_col="number", divisions=[0, 2, 4], npartitions=3, ) out = read_sql_table("test", db, index_col="number", bytes_per_chunk=100) m = out.map_partitions( lambda d: d.memory_usage(deep=True, index=True).sum() ).compute() assert (50 < m).all() and (m < 200).all() assert_eq(out, df) def test_meta(db): data = read_sql_table( "test", db, index_col="number", meta=dd.from_pandas(df, npartitions=1) ).compute() assert (data.name == df.name).all() assert data.index.name == "number" assert_eq(data, df) def test_meta_no_head_rows(db): data = read_sql_table( "test", db, index_col="number", meta=dd.from_pandas(df, npartitions=1), npartitions=2, head_rows=0, ) assert len(data.divisions) == 3 data = data.compute() assert (data.name == df.name).all() assert data.index.name == "number" assert_eq(data, df) data = read_sql_table( "test", db, index_col="number", meta=dd.from_pandas(df, npartitions=1), divisions=[0, 3, 6], head_rows=0, ) assert len(data.divisions) == 3 data = data.compute() assert (data.name == df.name).all() assert data.index.name == "number" assert_eq(data, df) def test_no_meta_no_head_rows(db): with pytest.raises(ValueError): read_sql_table("test", db, index_col="number", head_rows=0, npartitions=1) def test_range(db): data = read_sql_table("test", db, npartitions=2, index_col="number", limits=[1, 4]) assert data.index.min().compute() == 1 assert data.index.max().compute() == 4 def test_datetimes(): import datetime now = datetime.datetime.now() d = datetime.timedelta(seconds=1) df = pd.DataFrame( {"a": list("ghjkl"), "b": [now + i * d for i in range(2, -3, -1)]} ) with tmpfile() as f: uri = "sqlite:///%s" % f df.to_sql("test", uri, index=False, if_exists="replace") data = read_sql_table("test", uri, npartitions=2, index_col="b") assert data.index.dtype.kind == "M" assert data.divisions[0] == df.b.min() df2 = df.set_index("b") assert_eq(data.map_partitions(lambda x: x.sort_index()), df2.sort_index()) def test_with_func(db): from sqlalchemy import sql index = sql.func.abs(sql.column("negish")).label("abs") # function for the index, get all columns data = read_sql_table("test", db, npartitions=2, index_col=index) assert data.divisions[0] == 0 part = data.get_partition(0).compute() assert (part.index == 0).all() # now an arith op for one column too; it's name will be 'age' data = read_sql_table( "test", db, npartitions=2, index_col=index, columns=[index, -(sql.column("age"))], ) assert (data.age.compute() < 0).all() # a column that would have no name, give it a label index = (-(sql.column("negish"))).label("index") data = read_sql_table( "test", db, npartitions=2, index_col=index, columns=["negish", "age"] ) d = data.compute() assert (-d.index == d["negish"]).all() def test_no_nameless_index(db): from sqlalchemy import sql index = -(sql.column("negish")) with pytest.raises(ValueError): read_sql_table( "test", db, npartitions=2, index_col=index, columns=["negish", "age", index] ) index = sql.func.abs(sql.column("negish")) # function for the index, get all columns with pytest.raises(ValueError): read_sql_table("test", db, npartitions=2, index_col=index) def test_select_from_select(db): from sqlalchemy import sql s1 = sql.select([sql.column("number"), sql.column("name")]).select_from( sql.table("test") ) out = read_sql_table(s1, db, npartitions=2, index_col="number") assert_eq(out, df[["name"]]) def test_extra_connection_engine_keywords(capsys, db): data = read_sql_table( "test", db, npartitions=2, index_col="number", engine_kwargs={"echo": False} ).compute() # no captured message from the stdout with the echo=False parameter (this is the default) out, err = capsys.readouterr() assert "SELECT" not in out assert_eq(data, df) # with the echo=True sqlalchemy parameter, you should get all SQL queries in the stdout data = read_sql_table( "test", db, npartitions=2, index_col="number", engine_kwargs={"echo": True} ).compute() out, err = capsys.readouterr() assert "WHERE test.number >= ? AND test.number < ?" in out assert "WHERE test.number >= ? AND test.number <= ?" in out assert_eq(data, df) def test_no_character_index_without_divisions(db): # attempt to read the sql table with a character index and no divisions with pytest.raises(TypeError): read_sql_table("test", db, npartitions=2, index_col="name", divisions=None) @contextmanager def tmp_db_uri(): with tmpfile() as f: yield "sqlite:///%s" % f @pytest.mark.parametrize("npartitions", (1, 2)) @pytest.mark.parametrize("parallel", (False, True)) def test_to_sql(npartitions, parallel): df_by_age = df.set_index("age") df_appended = pd.concat( [ df, df, ] ) ddf = dd.from_pandas(df, npartitions) ddf_by_age = ddf.set_index("age") # Simple round trip test: use existing "number" index_col with tmp_db_uri() as uri: ddf.to_sql("test", uri, parallel=parallel) result = read_sql_table("test", uri, "number") assert_eq(df, result) # Test writing no index, and reading back in with one of the other columns as index (`read_sql_table` requires # an index_col) with tmp_db_uri() as uri: ddf.to_sql("test", uri, parallel=parallel, index=False) result = read_sql_table("test", uri, "negish") assert_eq(df.set_index("negish"), result) result = read_sql_table("test", uri, "age") assert_eq(df_by_age, result) # Index by "age" instead with tmp_db_uri() as uri: ddf_by_age.to_sql("test", uri, parallel=parallel) result = read_sql_table("test", uri, "age") assert_eq(df_by_age, result) # Index column can't have "object" dtype if no partitions are provided with tmp_db_uri() as uri: ddf.set_index("name").to_sql("test", uri) with pytest.raises( TypeError, match='Provided index column is of type "object". If divisions is not provided the index column type must be numeric or datetime.', # noqa: E501 ): read_sql_table("test", uri, "name") # Test various "if_exists" values with tmp_db_uri() as uri: ddf.to_sql("test", uri) # Writing a table that already exists fails with pytest.raises(ValueError, match="Table 'test' already exists"): ddf.to_sql("test", uri) ddf.to_sql("test", uri, parallel=parallel, if_exists="append") result = read_sql_table("test", uri, "number") assert_eq(df_appended, result) ddf_by_age.to_sql("test", uri, parallel=parallel, if_exists="replace") result = read_sql_table("test", uri, "age") assert_eq(df_by_age, result) # Verify number of partitions returned, when compute=False with tmp_db_uri() as uri: result = ddf.to_sql("test", uri, parallel=parallel, compute=False) # the first result is from the "meta" insert actual = len(result.compute()) assert actual == npartitions def test_to_sql_kwargs(): ddf = dd.from_pandas(df, 2) with tmp_db_uri() as uri: # "method" keyword is allowed iff pandas>=0.24.0 if PANDAS_GT_0240: ddf.to_sql("test", uri, method="multi") else: with pytest.raises( NotImplementedError, match=r"'method' requires pandas>=0.24.0. You have version 0.23.\d", ): ddf.to_sql("test", uri, method="multi") # Other, unknown keywords always disallowed with pytest.raises( TypeError, match="to_sql\\(\\) got an unexpected keyword argument 'unknown'" ): ddf.to_sql("test", uri, unknown=None)
bsd-3-clause
SM-G920P/Hacker_Kernel_SM-G92X
tools/perf/scripts/python/check-perf-trace.py
11214
2503
# perf script event handlers, generated by perf script -g python # (c) 2010, Tom Zanussi <tzanussi@gmail.com> # Licensed under the terms of the GNU GPL License version 2 # # This script tests basic functionality such as flag and symbol # strings, common_xxx() calls back into perf, begin, end, unhandled # events, etc. Basically, if this script runs successfully and # displays expected results, Python scripting support should be ok. import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from Core import * from perf_trace_context import * unhandled = autodict() def trace_begin(): print "trace_begin" pass def trace_end(): print_unhandled() def irq__softirq_entry(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, vec): print_header(event_name, common_cpu, common_secs, common_nsecs, common_pid, common_comm) print_uncommon(context) print "vec=%s\n" % \ (symbol_str("irq__softirq_entry", "vec", vec)), def kmem__kmalloc(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, call_site, ptr, bytes_req, bytes_alloc, gfp_flags): print_header(event_name, common_cpu, common_secs, common_nsecs, common_pid, common_comm) print_uncommon(context) print "call_site=%u, ptr=%u, bytes_req=%u, " \ "bytes_alloc=%u, gfp_flags=%s\n" % \ (call_site, ptr, bytes_req, bytes_alloc, flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)), def trace_unhandled(event_name, context, event_fields_dict): try: unhandled[event_name] += 1 except TypeError: unhandled[event_name] = 1 def print_header(event_name, cpu, secs, nsecs, pid, comm): print "%-20s %5u %05u.%09u %8u %-20s " % \ (event_name, cpu, secs, nsecs, pid, comm), # print trace fields not included in handler args def print_uncommon(context): print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \ % (common_pc(context), trace_flag_str(common_flags(context)), \ common_lock_depth(context)) def print_unhandled(): keys = unhandled.keys() if not keys: return print "\nunhandled events:\n\n", print "%-40s %10s\n" % ("event", "count"), print "%-40s %10s\n" % ("----------------------------------------", \ "-----------"), for event_name in keys: print "%-40s %10d\n" % (event_name, unhandled[event_name])
gpl-2.0
michath/ConMonkey
python/mozbuild/mozbuild/action/xpccheck.py
2
2462
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. '''A generic script to verify all test files are in the corresponding .ini file. Usage: xpccheck.py <directory> [<directory> ...] ''' import sys import os from glob import glob import manifestparser def getIniTests(testdir): mp = manifestparser.ManifestParser(strict=False) mp.read(os.path.join(testdir, 'xpcshell.ini')) return mp.tests def verifyDirectory(initests, directory): files = glob(os.path.join(os.path.abspath(directory), "test_*")) for f in files: if (not os.path.isfile(f)): continue name = os.path.basename(f) if name.endswith('.in'): name = name[:-3] if not name.endswith('.js'): continue found = False for test in initests: if os.path.join(os.path.abspath(directory), name) == test['path']: found = True break if not found: print >>sys.stderr, "TEST-UNEXPECTED-FAIL | xpccheck | test %s is missing from test manifest %s!" % (name, os.path.join(directory, 'xpcshell.ini')) sys.exit(1) def verifyIniFile(initests, directory): files = glob(os.path.join(os.path.abspath(directory), "test_*")) for test in initests: name = test['path'].split('/')[-1] found = False for f in files: fname = f.split('/')[-1] if fname.endswith('.in'): fname = '.in'.join(fname.split('.in')[:-1]) if os.path.join(os.path.abspath(directory), fname) == test['path']: found = True break if not found: print >>sys.stderr, "TEST-UNEXPECTED-FAIL | xpccheck | found %s in xpcshell.ini and not in directory '%s'" % (name, directory) sys.exit(1) def main(argv): if len(argv) < 2: print >>sys.stderr, "Usage: xpccheck.py <topsrcdir> <directory> [<directory> ...]" sys.exit(1) topsrcdir = argv[0] for d in argv[1:]: # xpcshell-unpack is a copy of xpcshell sibling directory and in the Makefile # we copy all files (including xpcshell.ini from the sibling directory. if d.endswith('toolkit/mozapps/extensions/test/xpcshell-unpack'): continue initests = getIniTests(d) verifyDirectory(initests, d) verifyIniFile(initests, d) if __name__ == '__main__': main(sys.argv[1:])
mpl-2.0
albertofwb/CloudMonitor
CloudMonitor/tests/PythonCodes/Client.py
1
2753
#! /usr/bin/python #-*- coding:utf-8 -*- ''' @author: 屈亮亮 @createTime: 2016-9-13 @function:文件上传 ''' import socket import struct import os import ssl import uuid import platform import hashlib def getMAC(): mac=uuid.UUID(int = uuid.getnode()).hex[-12:] if len(mac) == 12: mac = mac[0:2] + mac[2:4] + mac[4:6] +mac[6:8] +mac[8:10] +mac[10: ] return mac else: return '0' def getsysName(): sysName = platform.uname()[0] if len(sysName) > 0: return sysName else: return '0' def getMd5Hash(fileName): with open(fileName,'rb') as f: md5obj = hashlib.md5() md5obj.update(f.read()) filehash = md5obj.hexdigest() return filehash i = 100 while(i > 0): sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM) ssl_sock = ssl.wrap_socket(sock, ca_certs="cert.pem", cert_reqs=ssl.CERT_REQUIRED) e=0 try: #ssl_sock.connect(('10.102.1.116',50005)) ssl_sock.connect(('127.0.0.1',50005)) except(socket.timeout,e): print('timeout',e) except(socket.error,e): print('error',e) except e: print('any',e) if not e: #filename = input("fileName>")#输入文件名 #filename = "nginxConfig.text" filename = "test.txt" localFileName = os.path.join("/tmp/Files", filename) filename = filename.encode('utf-8') fileHash = getMd5Hash(localFileName) if fileHash: fileHash = fileHash.encode('utf-8') userName = "".encode('utf-8') version = getsysName() if version != '0': HostVersion = version.encode('utf-8') else: print("ERROR: get HostVersion err.") mac = getMAC() if mac != '0': hostMAC = mac.encode('utf-8') else: print("ERROR: get MAC err.") passwd = "".encode('utf-8') headValues = (filename, fileHash, userName, HostVersion, hostMAC, passwd, os.stat(localFileName).st_size) filePack = struct.Struct('128s32s32s32s48s20sI')#编码格式大小 fhead = filePack.pack(*headValues)#按照规则进行打包 ssl_sock.send(fhead) readRes = ssl_sock.recv(4).decode("utf8") if readRes == "ok": fp = open(localFileName,'rb') while 1: #发送文件 filedata = fp.read(1024) if not filedata: break ssl_sock.send(filedata) print("sending over...") fp.close() ssl_sock.close()
gpl-3.0
itsjeyd/edx-platform
lms/djangoapps/instructor_task/subtasks.py
10
27556
""" This module contains celery task functions for handling the management of subtasks. """ from time import time import json from uuid import uuid4 import psutil from contextlib import contextmanager import logging from celery.states import SUCCESS, READY_STATES, RETRY import dogstats_wrapper as dog_stats_api from django.db import transaction, DatabaseError from django.core.cache import cache from lms.djangoapps.instructor_task.models import InstructorTask, PROGRESS, QUEUING from util.db import outer_atomic TASK_LOG = logging.getLogger('edx.celery.task') # Lock expiration should be long enough to allow a subtask to complete. SUBTASK_LOCK_EXPIRE = 60 * 10 # Lock expires in 10 minutes # Number of times to retry if a subtask update encounters a lock on the InstructorTask. # (These are recursive retries, so don't make this number too large.) MAX_DATABASE_LOCK_RETRIES = 5 class DuplicateTaskException(Exception): """Exception indicating that a task already exists or has already completed.""" pass def _get_number_of_subtasks(total_num_items, items_per_task): """ Determines number of subtasks that would be generated by _generate_items_for_subtask. This needs to be calculated before the query is executed so that the list of all subtasks can be stored in the InstructorTask before any subtasks are started. The number of subtask_id values returned by this should match the number of chunks returned by the generate_items_for_subtask generator. """ num_subtasks, remainder = divmod(total_num_items, items_per_task) if remainder: num_subtasks += 1 return num_subtasks @contextmanager def track_memory_usage(metric, course_id): """ Context manager to track how much memory (in bytes) a given process uses. Metrics will look like: 'course_email.subtask_generation.memory.rss' or 'course_email.subtask_generation.memory.vms'. """ memory_types = ['rss', 'vms'] process = psutil.Process() baseline_memory_info = process.get_memory_info() baseline_usages = [getattr(baseline_memory_info, memory_type) for memory_type in memory_types] yield for memory_type, baseline_usage in zip(memory_types, baseline_usages): total_memory_info = process.get_memory_info() total_usage = getattr(total_memory_info, memory_type) memory_used = total_usage - baseline_usage dog_stats_api.increment( metric + "." + memory_type, memory_used, tags=["course_id:{}".format(course_id)], ) def _generate_items_for_subtask( item_querysets, # pylint: disable=bad-continuation item_fields, total_num_items, items_per_task, total_num_subtasks, course_id, ): """ Generates a chunk of "items" that should be passed into a subtask. Arguments: `item_querysets` : a list of query sets, each of which defines the "items" that should be passed to subtasks. `item_fields` : the fields that should be included in the dict that is returned. These are in addition to the 'pk' field. `total_num_items` : the result of summing the count of each queryset in `item_querysets`. `items_per_query` : size of chunks to break the query operation into. `items_per_task` : maximum size of chunks to break each query chunk into for use by a subtask. `course_id` : course_id of the course. Only needed for the track_memory_usage context manager. Returns: yields a list of dicts, where each dict contains the fields in `item_fields`, plus the 'pk' field. Warning: if the algorithm here changes, the _get_number_of_subtasks() method should similarly be changed. """ num_items_queued = 0 all_item_fields = list(item_fields) all_item_fields.append('pk') num_subtasks = 0 items_for_task = [] with track_memory_usage('course_email.subtask_generation.memory', course_id): for queryset in item_querysets: for item in queryset.values(*all_item_fields).iterator(): if len(items_for_task) == items_per_task and num_subtasks < total_num_subtasks - 1: yield items_for_task num_items_queued += items_per_task items_for_task = [] num_subtasks += 1 items_for_task.append(item) # yield remainder items for task, if any if items_for_task: yield items_for_task num_items_queued += len(items_for_task) # Note, depending on what kind of DB is used, it's possible for the queryset # we iterate over to change in the course of the query. Therefore it's # possible that there are more (or fewer) items queued than were initially # calculated. It also means it's possible that the last task contains # more items than items_per_task allows. We expect this to be a small enough # number as to be negligible. if num_items_queued != total_num_items: TASK_LOG.info("Number of items generated by chunking %s not equal to original total %s", num_items_queued, total_num_items) class SubtaskStatus(object): """ Create and return a dict for tracking the status of a subtask. SubtaskStatus values are: 'task_id' : id of subtask. This is used to pass task information across retries. 'attempted' : number of attempts -- should equal succeeded plus failed 'succeeded' : number that succeeded in processing 'skipped' : number that were not processed. 'failed' : number that failed during processing 'retried_nomax' : number of times the subtask has been retried for conditions that should not have a maximum count applied 'retried_withmax' : number of times the subtask has been retried for conditions that should have a maximum count applied 'state' : celery state of the subtask (e.g. QUEUING, PROGRESS, RETRY, FAILURE, SUCCESS) Object is not JSON-serializable, so to_dict and from_dict methods are provided so that it can be passed as a serializable argument to tasks (and be reconstituted within such tasks). In future, we may want to include specific error information indicating the reason for failure. Also, we should count up "not attempted" separately from attempted/failed. """ def __init__(self, task_id, attempted=None, succeeded=0, failed=0, skipped=0, retried_nomax=0, retried_withmax=0, state=None): """Construct a SubtaskStatus object.""" self.task_id = task_id if attempted is not None: self.attempted = attempted else: self.attempted = succeeded + failed self.succeeded = succeeded self.failed = failed self.skipped = skipped self.retried_nomax = retried_nomax self.retried_withmax = retried_withmax self.state = state if state is not None else QUEUING @classmethod def from_dict(cls, d): """Construct a SubtaskStatus object from a dict representation.""" options = dict(d) task_id = options['task_id'] del options['task_id'] return SubtaskStatus.create(task_id, **options) @classmethod def create(cls, task_id, **options): """Construct a SubtaskStatus object.""" return cls(task_id, **options) def to_dict(self): """ Output a dict representation of a SubtaskStatus object. Use for creating a JSON-serializable representation for use by tasks. """ return self.__dict__ def increment(self, succeeded=0, failed=0, skipped=0, retried_nomax=0, retried_withmax=0, state=None): """ Update the result of a subtask with additional results. Kwarg arguments are incremented to the existing values. The exception is for `state`, which if specified is used to override the existing value. """ self.attempted += (succeeded + failed) self.succeeded += succeeded self.failed += failed self.skipped += skipped self.retried_nomax += retried_nomax self.retried_withmax += retried_withmax if state is not None: self.state = state def get_retry_count(self): """Returns the number of retries of any kind.""" return self.retried_nomax + self.retried_withmax def __repr__(self): """Return print representation of a SubtaskStatus object.""" return 'SubtaskStatus<%r>' % (self.to_dict(),) def __unicode__(self): """Return unicode version of a SubtaskStatus object representation.""" return unicode(repr(self)) def initialize_subtask_info(entry, action_name, total_num, subtask_id_list): """ Store initial subtask information to InstructorTask object. The InstructorTask's "task_output" field is initialized. This is a JSON-serialized dict. Counters for 'attempted', 'succeeded', 'failed', 'skipped' keys are initialized to zero, as is the 'duration_ms' value. A 'start_time' is stored for later duration calculations, and the total number of "things to do" is set, so the user can be told how much needs to be done overall. The `action_name` is also stored, to help with constructing more readable task_progress messages. The InstructorTask's "subtasks" field is also initialized. This is also a JSON-serialized dict. Keys include 'total', 'succeeded', 'retried', 'failed', which are counters for the number of subtasks. 'Total' is set here to the total number, while the other three are initialized to zero. Once the counters for 'succeeded' and 'failed' match the 'total', the subtasks are done and the InstructorTask's "status" will be changed to SUCCESS. The "subtasks" field also contains a 'status' key, that contains a dict that stores status information for each subtask. The value for each subtask (keyed by its task_id) is its subtask status, as defined by SubtaskStatus.to_dict(). This information needs to be set up in the InstructorTask before any of the subtasks start running. If not, there is a chance that the subtasks could complete before the parent task is done creating subtasks. Doing so also simplifies the save() here, as it avoids the need for locking. Monitoring code should assume that if an InstructorTask has subtask information, that it should rely on the status stored in the InstructorTask object, rather than status stored in the corresponding AsyncResult. """ task_progress = { 'action_name': action_name, 'attempted': 0, 'failed': 0, 'skipped': 0, 'succeeded': 0, 'total': total_num, 'duration_ms': int(0), 'start_time': time() } entry.task_output = InstructorTask.create_output_for_success(task_progress) entry.task_state = PROGRESS # Write out the subtasks information. num_subtasks = len(subtask_id_list) # Note that may not be necessary to store initial value with all those zeroes! # Write out as a dict, so it will go more smoothly into json. subtask_status = {subtask_id: (SubtaskStatus.create(subtask_id)).to_dict() for subtask_id in subtask_id_list} subtask_dict = { 'total': num_subtasks, 'succeeded': 0, 'failed': 0, 'status': subtask_status } entry.subtasks = json.dumps(subtask_dict) # and save the entry immediately, before any subtasks actually start work: entry.save_now() return task_progress # pylint: disable=bad-continuation def queue_subtasks_for_query( entry, action_name, create_subtask_fcn, item_querysets, item_fields, items_per_task, total_num_items, ): """ Generates and queues subtasks to each execute a chunk of "items" generated by a queryset. Arguments: `entry` : the InstructorTask object for which subtasks are being queued. `action_name` : a past-tense verb that can be used for constructing readable status messages. `create_subtask_fcn` : a function of two arguments that constructs the desired kind of subtask object. Arguments are the list of items to be processed by this subtask, and a SubtaskStatus object reflecting initial status (and containing the subtask's id). `item_querysets` : a list of query sets that define the "items" that should be passed to subtasks. `item_fields` : the fields that should be included in the dict that is returned. These are in addition to the 'pk' field. `items_per_task` : maximum size of chunks to break each query chunk into for use by a subtask. `total_num_items` : total amount of items that will be put into subtasks Returns: the task progress as stored in the InstructorTask object. """ task_id = entry.task_id # Calculate the number of tasks that will be created, and create a list of ids for each task. total_num_subtasks = _get_number_of_subtasks(total_num_items, items_per_task) subtask_id_list = [str(uuid4()) for _ in range(total_num_subtasks)] # Update the InstructorTask with information about the subtasks we've defined. TASK_LOG.info( "Task %s: updating InstructorTask %s with subtask info for %s subtasks to process %s items.", task_id, entry.id, total_num_subtasks, total_num_items, ) # Make sure this is committed to database before handing off subtasks to celery. with outer_atomic(): progress = initialize_subtask_info(entry, action_name, total_num_items, subtask_id_list) # Construct a generator that will return the recipients to use for each subtask. # Pass in the desired fields to fetch for each recipient. item_list_generator = _generate_items_for_subtask( item_querysets, item_fields, total_num_items, items_per_task, total_num_subtasks, entry.course_id, ) # Now create the subtasks, and start them running. TASK_LOG.info( "Task %s: creating %s subtasks to process %s items.", task_id, total_num_subtasks, total_num_items, ) num_subtasks = 0 for item_list in item_list_generator: subtask_id = subtask_id_list[num_subtasks] num_subtasks += 1 subtask_status = SubtaskStatus.create(subtask_id) new_subtask = create_subtask_fcn(item_list, subtask_status) new_subtask.apply_async() # Subtasks have been queued so no exceptions should be raised after this point. # Return the task progress as stored in the InstructorTask object. return progress def _acquire_subtask_lock(task_id): """ Mark the specified task_id as being in progress. This is used to make sure that the same task is not worked on by more than one worker at the same time. This can occur when tasks are requeued by Celery in response to loss of connection to the task broker. Most of the time, such duplicate tasks are run sequentially, but they can overlap in processing as well. Returns true if the task_id was not already locked; false if it was. """ # cache.add fails if the key already exists key = "subtask-{}".format(task_id) succeeded = cache.add(key, 'true', SUBTASK_LOCK_EXPIRE) if not succeeded: TASK_LOG.warning("task_id '%s': already locked. Contains value '%s'", task_id, cache.get(key)) return succeeded def _release_subtask_lock(task_id): """ Unmark the specified task_id as being no longer in progress. This is most important to permit a task to be retried. """ # According to Celery task cookbook, "Memcache delete is very slow, but we have # to use it to take advantage of using add() for atomic locking." key = "subtask-{}".format(task_id) cache.delete(key) def check_subtask_is_valid(entry_id, current_task_id, new_subtask_status): """ Confirms that the current subtask is known to the InstructorTask and hasn't already been completed. Problems can occur when the parent task has been run twice, and results in duplicate subtasks being created for the same InstructorTask entry. This maybe happens when Celery loses its connection to its broker, and any current tasks get requeued. If a parent task gets requeued, then the same InstructorTask may have a different set of subtasks defined (to do the same thing), so the subtasks from the first queuing would not be known to the InstructorTask. We return an exception in this case. If a subtask gets requeued, then the first time the subtask runs it should run fine to completion. However, we want to prevent it from running again, so we check here to see what the existing subtask's status is. If it is complete, we raise an exception. We also take a lock on the task, so that we can detect if another worker has started work but has not yet completed that work. The other worker is allowed to finish, and this raises an exception. Raises a DuplicateTaskException exception if it's not a task that should be run. If this succeeds, it requires that update_subtask_status() is called to release the lock on the task. """ # Confirm that the InstructorTask actually defines subtasks. entry = InstructorTask.objects.get(pk=entry_id) if len(entry.subtasks) == 0: format_str = "Unexpected task_id '{}': unable to find subtasks of instructor task '{}': rejecting task {}" msg = format_str.format(current_task_id, entry, new_subtask_status) TASK_LOG.warning(msg) dog_stats_api.increment('instructor_task.subtask.duplicate.nosubtasks', tags=[entry.course_id]) raise DuplicateTaskException(msg) # Confirm that the InstructorTask knows about this particular subtask. subtask_dict = json.loads(entry.subtasks) subtask_status_info = subtask_dict['status'] if current_task_id not in subtask_status_info: format_str = "Unexpected task_id '{}': unable to find status for subtask of instructor task '{}': rejecting task {}" msg = format_str.format(current_task_id, entry, new_subtask_status) TASK_LOG.warning(msg) dog_stats_api.increment('instructor_task.subtask.duplicate.unknown', tags=[entry.course_id]) raise DuplicateTaskException(msg) # Confirm that the InstructorTask doesn't think that this subtask has already been # performed successfully. subtask_status = SubtaskStatus.from_dict(subtask_status_info[current_task_id]) subtask_state = subtask_status.state if subtask_state in READY_STATES: format_str = "Unexpected task_id '{}': already completed - status {} for subtask of instructor task '{}': rejecting task {}" msg = format_str.format(current_task_id, subtask_status, entry, new_subtask_status) TASK_LOG.warning(msg) dog_stats_api.increment('instructor_task.subtask.duplicate.completed', tags=[entry.course_id]) raise DuplicateTaskException(msg) # Confirm that the InstructorTask doesn't think that this subtask is already being # retried by another task. if subtask_state == RETRY: # Check to see if the input number of retries is less than the recorded number. # If so, then this is an earlier version of the task, and a duplicate. new_retry_count = new_subtask_status.get_retry_count() current_retry_count = subtask_status.get_retry_count() if new_retry_count < current_retry_count: format_str = "Unexpected task_id '{}': already retried - status {} for subtask of instructor task '{}': rejecting task {}" msg = format_str.format(current_task_id, subtask_status, entry, new_subtask_status) TASK_LOG.warning(msg) dog_stats_api.increment('instructor_task.subtask.duplicate.retried', tags=[entry.course_id]) raise DuplicateTaskException(msg) # Now we are ready to start working on this. Try to lock it. # If it fails, then it means that another worker is already in the # middle of working on this. if not _acquire_subtask_lock(current_task_id): format_str = "Unexpected task_id '{}': already being executed - for subtask of instructor task '{}'" msg = format_str.format(current_task_id, entry) TASK_LOG.warning(msg) dog_stats_api.increment('instructor_task.subtask.duplicate.locked', tags=[entry.course_id]) raise DuplicateTaskException(msg) def update_subtask_status(entry_id, current_task_id, new_subtask_status, retry_count=0): """ Update the status of the subtask in the parent InstructorTask object tracking its progress. Because select_for_update is used to lock the InstructorTask object while it is being updated, multiple subtasks updating at the same time may time out while waiting for the lock. The actual update operation is surrounded by a try/except/else that permits the update to be retried if the transaction times out. The subtask lock acquired in the call to check_subtask_is_valid() is released here, only when the attempting of retries has concluded. """ try: _update_subtask_status(entry_id, current_task_id, new_subtask_status) except DatabaseError: # If we fail, try again recursively. retry_count += 1 if retry_count < MAX_DATABASE_LOCK_RETRIES: TASK_LOG.info("Retrying to update status for subtask %s of instructor task %d with status %s: retry %d", current_task_id, entry_id, new_subtask_status, retry_count) dog_stats_api.increment('instructor_task.subtask.retry_after_failed_update') update_subtask_status(entry_id, current_task_id, new_subtask_status, retry_count) else: TASK_LOG.info("Failed to update status after %d retries for subtask %s of instructor task %d with status %s", retry_count, current_task_id, entry_id, new_subtask_status) dog_stats_api.increment('instructor_task.subtask.failed_after_update_retries') raise finally: # Only release the lock on the subtask when we're done trying to update it. # Note that this will be called each time a recursive call to update_subtask_status() # returns. Fortunately, it's okay to release a lock that has already been released. _release_subtask_lock(current_task_id) @transaction.atomic def _update_subtask_status(entry_id, current_task_id, new_subtask_status): """ Update the status of the subtask in the parent InstructorTask object tracking its progress. Uses select_for_update to lock the InstructorTask object while it is being updated. The operation is surrounded by a try/except/else that permit the manual transaction to be committed on completion, or rolled back on error. The InstructorTask's "task_output" field is updated. This is a JSON-serialized dict. Accumulates values for 'attempted', 'succeeded', 'failed', 'skipped' from `new_subtask_status` into the corresponding values in the InstructorTask's task_output. Also updates the 'duration_ms' value with the current interval since the original InstructorTask started. Note that this value is only approximate, since the subtask may be running on a different server than the original task, so is subject to clock skew. The InstructorTask's "subtasks" field is also updated. This is also a JSON-serialized dict. Keys include 'total', 'succeeded', 'retried', 'failed', which are counters for the number of subtasks. 'Total' is expected to have been set at the time the subtasks were created. The other three counters are incremented depending on the value of `status`. Once the counters for 'succeeded' and 'failed' match the 'total', the subtasks are done and the InstructorTask's "status" is changed to SUCCESS. The "subtasks" field also contains a 'status' key, that contains a dict that stores status information for each subtask. At the moment, the value for each subtask (keyed by its task_id) is the value of the SubtaskStatus.to_dict(), but could be expanded in future to store information about failure messages, progress made, etc. """ TASK_LOG.info("Preparing to update status for subtask %s for instructor task %d with status %s", current_task_id, entry_id, new_subtask_status) try: entry = InstructorTask.objects.select_for_update().get(pk=entry_id) subtask_dict = json.loads(entry.subtasks) subtask_status_info = subtask_dict['status'] if current_task_id not in subtask_status_info: # unexpected error -- raise an exception format_str = "Unexpected task_id '{}': unable to update status for subtask of instructor task '{}'" msg = format_str.format(current_task_id, entry_id) TASK_LOG.warning(msg) raise ValueError(msg) # Update status: subtask_status_info[current_task_id] = new_subtask_status.to_dict() # Update the parent task progress. # Set the estimate of duration, but only if it # increases. Clock skew between time() returned by different machines # may result in non-monotonic values for duration. task_progress = json.loads(entry.task_output) start_time = task_progress['start_time'] prev_duration = task_progress['duration_ms'] new_duration = int((time() - start_time) * 1000) task_progress['duration_ms'] = max(prev_duration, new_duration) # Update counts only when subtask is done. # In future, we can make this more responsive by updating status # between retries, by comparing counts that change from previous # retry. new_state = new_subtask_status.state if new_subtask_status is not None and new_state in READY_STATES: for statname in ['attempted', 'succeeded', 'failed', 'skipped']: task_progress[statname] += getattr(new_subtask_status, statname) # Figure out if we're actually done (i.e. this is the last task to complete). # This is easier if we just maintain a counter, rather than scanning the # entire new_subtask_status dict. if new_state == SUCCESS: subtask_dict['succeeded'] += 1 elif new_state in READY_STATES: subtask_dict['failed'] += 1 num_remaining = subtask_dict['total'] - subtask_dict['succeeded'] - subtask_dict['failed'] # If we're done with the last task, update the parent status to indicate that. # At present, we mark the task as having succeeded. In future, we should see # if there was a catastrophic failure that occurred, and figure out how to # report that here. if num_remaining <= 0: entry.task_state = SUCCESS entry.subtasks = json.dumps(subtask_dict) entry.task_output = InstructorTask.create_output_for_success(task_progress) TASK_LOG.debug("about to save....") entry.save() TASK_LOG.info("Task output updated to %s for subtask %s of instructor task %d", entry.task_output, current_task_id, entry_id) except Exception: TASK_LOG.exception("Unexpected error while updating InstructorTask.") dog_stats_api.increment('instructor_task.subtask.update_exception') raise
agpl-3.0
swinghu/zulip
zerver/lib/response.py
124
1316
from __future__ import absolute_import from django.http import HttpResponse, HttpResponseNotAllowed import ujson class HttpResponseUnauthorized(HttpResponse): status_code = 401 def __init__(self, realm): HttpResponse.__init__(self) self["WWW-Authenticate"] = 'Basic realm="%s"' % (realm,) def json_unauthorized(message): resp = HttpResponseUnauthorized("zulip") resp.content = ujson.dumps({"result": "error", "msg": message}) + "\n" return resp def json_method_not_allowed(methods): resp = HttpResponseNotAllowed(methods) resp.content = ujson.dumps({"result": "error", "msg": "Method Not Allowed", "allowed_methods": methods}) return resp def json_response(res_type="success", msg="", data={}, status=200): content = {"result": res_type, "msg": msg} content.update(data) return HttpResponse(content=ujson.dumps(content) + "\n", content_type='application/json', status=status) def json_success(data={}): return json_response(data=data) def json_error(msg, data={}, status=400): return json_response(res_type="error", msg=msg, data=data, status=status) def json_unhandled_exception(): return json_response(res_type="error", msg="Internal server error", status=500)
apache-2.0
cloudnull/rpc-maas
playbooks/files/rax-maas/rally/plugins/swift_customer_container_object_scenario.py
4
1092
import tempfile from rally import consts from rally.plugins.openstack import scenario from rally.plugins.openstack.scenarios.swift import utils from rally.task import atomic from rally.task import validation """Scenarios for Swift Objects.""" @validation.required_services(consts.Service.SWIFT) @validation.required_openstack(users=True) @scenario.configure(context={"cleanup": ["swift"]}, name="SwiftObjects.create_c_and_o" "_then_download_and_delete_all") class CreateContainerAndObjectThenDownloadAndDeleteAll(utils.SwiftScenario): def run(self, object_size=1024, **kwargs): container_name = None container_name = self._create_container(**kwargs) with tempfile.TemporaryFile() as dummy_file: dummy_file.truncate(object_size) dummy_file.seek(0) object_name = self._upload_object(container_name, dummy_file)[1] self._download_object(container_name, object_name) self._delete_object(container_name, object_name) self._delete_container(container_name)
apache-2.0
NcLang/vimrc
sources_non_forked/YouCompleteMe/third_party/ycmd/third_party/JediHTTP/jedihttp/hmaclib.py
5
3167
# Copyright 2015 Cedraro Andrea <a.cedraro@gmail.com> # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import hmac import hashlib import tempfile from base64 import b64encode, b64decode from jedihttp.compatibility import encode_string, decode_string, compare_digest def TemporaryHmacSecretFile( secret ): """Helper function for passing the hmac secret when starting a JediHTTP server with TemporaryHmacSecretFile( 'mysecret' ) as hmac_file: jedihttp = subprocess.Popen( ['python', 'jedihttp', '--hmac-file-secret', hmac_file.name ] ) The JediHTTP Server as soon as it reads the hmac secret will remove the file """ hmac_file = tempfile.NamedTemporaryFile( 'w', delete = False ) encoded_secret = decode_string( b64encode( encode_string( secret ) ) ) json.dump( { 'hmac_secret': encoded_secret }, hmac_file ) return hmac_file _HMAC_HEADER = 'x-jedihttp-hmac' class JediHTTPHmacHelper( object ): """Helper class to correctly signing requests and validating responses when communicating with a JediHTTP server.""" def __init__( self, secret ): self._secret = encode_string( secret ) def _HasHeader( self, headers ): return _HMAC_HEADER in headers def _SetHmacHeader( self, headers, hmac ): headers[ _HMAC_HEADER ] = decode_string( b64encode( hmac ) ) def _GetHmacHeader( self, headers ): return b64decode( headers[ _HMAC_HEADER ] ) def _Hmac( self, content ): return hmac.new( self._secret, msg = encode_string( content ), digestmod = hashlib.sha256 ).digest() def _ComputeRequestHmac( self, method, path, body ): if not body: body = '' return self._Hmac( b''.join( ( self._Hmac( method ), self._Hmac( path ), self._Hmac( body ) ) ) ) def SignRequestHeaders( self, headers, method, path, body ): self._SetHmacHeader( headers, self._ComputeRequestHmac( method, path, body ) ) def IsRequestAuthenticated( self, headers, method, path, body ): if not self._HasHeader( headers ): return False return compare_digest( self._GetHmacHeader( headers ), self._ComputeRequestHmac( method, path, body ) ) def SignResponseHeaders( self, headers, body ): self._SetHmacHeader( headers, self._Hmac( body ) ) def IsResponseAuthenticated( self, headers, content ): if not self._HasHeader( headers ): return False return compare_digest( self._GetHmacHeader( headers ), self._Hmac( content ) )
mit
ruchee/vimrc
vimfiles/bundle/vim-python/submodules/pydocstyle/src/tests/test_cases/canonical_numpy_examples.py
3
5315
"""This is the docstring for the example.py module. Modules names should have short, all-lowercase names. The module name may have underscores if this improves readability. Every module should have a docstring at the very top of the file. The module's docstring may extend over multiple lines. If your docstring does extend over multiple lines, the closing three quotation marks must be on a line by itself, preferably preceded by a blank line. """ # Example source file from the official "numpydoc docstring guide" # documentation (with the modification of commenting out all the original # ``import`` lines, plus adding this note and ``Expectation`` code): # * As HTML: https://numpydoc.readthedocs.io/en/latest/example.html # * Source Python: # https://github.com/numpy/numpydoc/blob/master/doc/example.py # from __future__ import division, absolute_import, print_function # # import os # standard library imports first # # Do NOT import using *, e.g. from numpy import * # # Import the module using # # import numpy # # instead or import individual functions as needed, e.g # # from numpy import array, zeros # # If you prefer the use of abbreviated module names, we suggest the # convention used by NumPy itself:: # # import numpy as np # import matplotlib as mpl # import matplotlib.pyplot as plt # # These abbreviated names are not to be used in docstrings; users must # be able to paste and execute docstrings after importing only the # numpy module itself, unabbreviated. import os from .expected import Expectation expectation = Expectation() expect = expectation.expect # module docstring expected violations: expectation.expected.add(( os.path.normcase(__file__), "D205: 1 blank line required between summary line and description " "(found 0)")) expectation.expected.add(( os.path.normcase(__file__), "D213: Multi-line docstring summary should start at the second line")) expectation.expected.add(( os.path.normcase(__file__), "D400: First line should end with a period (not 'd')")) expectation.expected.add(( os.path.normcase(__file__), "D404: First word of the docstring should not be `This`")) expectation.expected.add(( os.path.normcase(__file__), "D415: First line should end with a period, question mark, or exclamation " "point (not 'd')")) @expect("D213: Multi-line docstring summary should start at the second line", arg_count=3) @expect("D401: First line should be in imperative mood; try rephrasing " "(found 'A')", arg_count=3) @expect("D413: Missing blank line after last section ('Examples')", arg_count=3) def foo(var1, var2, long_var_name='hi'): r"""A one-line summary that does not use variable names. Several sentences providing an extended description. Refer to variables using back-ticks, e.g. `var`. Parameters ---------- var1 : array_like Array_like means all those objects -- lists, nested lists, etc. -- that can be converted to an array. We can also refer to variables like `var1`. var2 : int The type above can either refer to an actual Python type (e.g. ``int``), or describe the type of the variable in more detail, e.g. ``(N,) ndarray`` or ``array_like``. long_var_name : {'hi', 'ho'}, optional Choices in brackets, default first when optional. Returns ------- type Explanation of anonymous return value of type ``type``. describe : type Explanation of return value named `describe`. out : type Explanation of `out`. type_without_description Other Parameters ---------------- only_seldom_used_keywords : type Explanation common_parameters_listed_above : type Explanation Raises ------ BadException Because you shouldn't have done that. See Also -------- numpy.array : Relationship (optional). numpy.ndarray : Relationship (optional), which could be fairly long, in which case the line wraps here. numpy.dot, numpy.linalg.norm, numpy.eye Notes ----- Notes about the implementation algorithm (if needed). This can have multiple paragraphs. You may include some math: .. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n} And even use a Greek symbol like :math:`\omega` inline. References ---------- Cite the relevant literature, e.g. [1]_. You may also cite these references in the notes section above. .. [1] O. McNoleg, "The integration of GIS, remote sensing, expert systems and adaptive co-kriging for environmental habitat modelling of the Highland Haggis using object-oriented, fuzzy-logic and neural-network techniques," Computers & Geosciences, vol. 22, pp. 585-588, 1996. Examples -------- These are written in doctest format, and should illustrate how to use the function. >>> a = [1, 2, 3] >>> print([x + 3 for x in a]) [4, 5, 6] >>> print("a\nb") a b """ # After closing class docstring, there should be one blank line to # separate following codes (according to PEP257). # But for function, method and module, there should be no blank lines # after closing the docstring. pass
mit
perzizzle/ansible-modules-extras
windows/win_chocolatey.py
78
3019
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2014, Trond Hindenes <trond@hindenes.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # this is a windows documentation stub. actual code lives in the .ps1 # file of the same name DOCUMENTATION = ''' --- module: win_chocolatey version_added: "1.9" short_description: Installs packages using chocolatey description: - Installs packages using Chocolatey (http://chocolatey.org/). If Chocolatey is missing from the system, the module will install it. List of packages can be found at http://chocolatey.org/packages options: name: description: - Name of the package to be installed required: true default: null aliases: [] state: description: - State of the package on the system required: false choices: - present - absent default: present aliases: [] force: description: - Forces install of the package (even if it already exists). Using Force will cause ansible to always report that a change was made required: false choices: - yes - no default: no aliases: [] upgrade: description: - If package is already installed it, try to upgrade to the latest version or to the specified version required: false choices: - yes - no default: no aliases: [] version: description: - Specific version of the package to be installed - Ignored when state == 'absent' required: false default: null aliases: [] source: description: - Specify source rather than using default chocolatey repository require: false default: null aliases: [] author: "Trond Hindenes (@trondhindenes), Peter Mounce (@petemounce), Pepe Barbe (@elventear), Adam Keech (@smadam813)" ''' # TODO: # * Better parsing when a package has dependencies - currently fails # * Time each item that is run # * Support 'changed' with gems - would require shelling out to `gem list` first and parsing, kinda defeating the point of using chocolatey. EXAMPLES = ''' # Install git win_chocolatey: name: git # Install notepadplusplus version 6.6 win_chocolatey: name: notepadplusplus.install version: 6.6 # Uninstall git win_chocolatey: name: git state: absent # Install git from specified repository win_chocolatey: name: git source: https://someserver/api/v2/ '''
gpl-3.0
Scheirle/pelican
setup.py
2
2712
#!/usr/bin/env python from io import open from os import walk from os.path import join, relpath import sys from setuptools import setup requires = ['feedgenerator >= 1.9', 'jinja2 >= 2.7', 'pygments', 'docutils', 'pytz >= 0a', 'blinker', 'unidecode', 'six >= 1.4', 'python-dateutil'] entry_points = { 'console_scripts': [ 'pelican = pelican:main', 'pelican-import = pelican.tools.pelican_import:main', 'pelican-quickstart = pelican.tools.pelican_quickstart:main', 'pelican-themes = pelican.tools.pelican_themes:main' ] } README = open('README.rst', encoding='utf-8').read() CHANGELOG = open('docs/changelog.rst', encoding='utf-8').read() description = u'\n'.join([README, CHANGELOG]) if sys.version_info.major < 3: description = description.encode('utf-8') setup( name='pelican', version='3.7.2.dev0', url='http://getpelican.com/', author='Alexis Metaireau', maintainer='Justin Mayer', author_email='authors@getpelican.com', description="Static site generator supporting reStructuredText and " "Markdown source content.", long_description=description, packages=['pelican', 'pelican.tools'], package_data={ # we manually collect the package data, as opposed to using, # include_package_data=True because we don't want the tests to be # included automatically as package data (MANIFEST.in is too greedy) 'pelican': [relpath(join(root, name), 'pelican') for root, _, names in walk(join('pelican', 'themes')) for name in names], 'pelican.tools': [relpath(join(root, name), join('pelican', 'tools')) for root, _, names in walk(join('pelican', 'tools', 'templates')) for name in names], }, install_requires=requires, entry_points=entry_points, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'License :: OSI Approved :: GNU Affero General Public License v3', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Software Development :: Libraries :: Python Modules', ], test_suite='pelican.tests', )
agpl-3.0
ivandevp/django
tests/basic/tests.py
97
29877
from __future__ import unicode_literals import threading import warnings from datetime import datetime, timedelta from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist from django.db import DEFAULT_DB_ALIAS, DatabaseError, connections from django.db.models.fields import Field from django.db.models.fields.related import ForeignObjectRel from django.db.models.manager import BaseManager from django.db.models.query import EmptyQuerySet, QuerySet from django.test import ( SimpleTestCase, TestCase, TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature, ) from django.utils import six from django.utils.translation import ugettext_lazy from .models import Article, ArticleSelectOnSave, SelfRef class ModelInstanceCreationTests(TestCase): def test_object_is_not_written_to_database_until_save_was_called(self): a = Article( id=None, headline='Parrot programs in Python', pub_date=datetime(2005, 7, 28), ) self.assertIsNone(a.id) self.assertEqual(Article.objects.all().count(), 0) # Save it into the database. You have to call save() explicitly. a.save() self.assertIsNotNone(a.id) self.assertEqual(Article.objects.all().count(), 1) def test_can_initialize_model_instance_using_positional_arguments(self): """ You can initialize a model instance using positional arguments, which should match the field order as defined in the model. """ a = Article(None, 'Second article', datetime(2005, 7, 29)) a.save() self.assertEqual(a.headline, 'Second article') self.assertEqual(a.pub_date, datetime(2005, 7, 29, 0, 0)) def test_can_create_instance_using_kwargs(self): a = Article( id=None, headline='Third article', pub_date=datetime(2005, 7, 30), ) a.save() self.assertEqual(a.headline, 'Third article') self.assertEqual(a.pub_date, datetime(2005, 7, 30, 0, 0)) def test_autofields_generate_different_values_for_each_instance(self): a1 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0)) a2 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0)) a3 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0)) self.assertNotEqual(a3.id, a1.id) self.assertNotEqual(a3.id, a2.id) def test_can_mix_and_match_position_and_kwargs(self): # You can also mix and match position and keyword arguments, but # be sure not to duplicate field information. a = Article(None, 'Fourth article', pub_date=datetime(2005, 7, 31)) a.save() self.assertEqual(a.headline, 'Fourth article') def test_cannot_create_instance_with_invalid_kwargs(self): six.assertRaisesRegex( self, TypeError, "'foo' is an invalid keyword argument for this function", Article, id=None, headline='Some headline', pub_date=datetime(2005, 7, 31), foo='bar', ) def test_can_leave_off_value_for_autofield_and_it_gets_value_on_save(self): """ You can leave off the value for an AutoField when creating an object, because it'll get filled in automatically when you save(). """ a = Article(headline='Article 5', pub_date=datetime(2005, 7, 31)) a.save() self.assertEqual(a.headline, 'Article 5') self.assertNotEqual(a.id, None) def test_leaving_off_a_field_with_default_set_the_default_will_be_saved(self): a = Article(pub_date=datetime(2005, 7, 31)) a.save() self.assertEqual(a.headline, 'Default headline') def test_for_datetimefields_saves_as_much_precision_as_was_given(self): """as much precision in *seconds*""" a1 = Article( headline='Article 7', pub_date=datetime(2005, 7, 31, 12, 30), ) a1.save() self.assertEqual(Article.objects.get(id__exact=a1.id).pub_date, datetime(2005, 7, 31, 12, 30)) a2 = Article( headline='Article 8', pub_date=datetime(2005, 7, 31, 12, 30, 45), ) a2.save() self.assertEqual(Article.objects.get(id__exact=a2.id).pub_date, datetime(2005, 7, 31, 12, 30, 45)) def test_saving_an_object_again_does_not_create_a_new_object(self): a = Article(headline='original', pub_date=datetime(2014, 5, 16)) a.save() current_id = a.id a.save() self.assertEqual(a.id, current_id) a.headline = 'Updated headline' a.save() self.assertEqual(a.id, current_id) def test_querysets_checking_for_membership(self): headlines = [ 'Parrot programs in Python', 'Second article', 'Third article'] some_pub_date = datetime(2014, 5, 16, 12, 1) for headline in headlines: Article(headline=headline, pub_date=some_pub_date).save() a = Article(headline='Some headline', pub_date=some_pub_date) a.save() # You can use 'in' to test for membership... self.assertIn(a, Article.objects.all()) # ... but there will often be more efficient ways if that is all you need: self.assertTrue(Article.objects.filter(id=a.id).exists()) class ModelTest(TestCase): def test_objects_attribute_is_only_available_on_the_class_itself(self): six.assertRaisesRegex( self, AttributeError, "Manager isn't accessible via Article instances", getattr, Article(), "objects", ) self.assertFalse(hasattr(Article(), 'objects')) self.assertTrue(hasattr(Article, 'objects')) def test_queryset_delete_removes_all_items_in_that_queryset(self): headlines = [ 'An article', 'Article One', 'Amazing article', 'Boring article'] some_pub_date = datetime(2014, 5, 16, 12, 1) for headline in headlines: Article(headline=headline, pub_date=some_pub_date).save() self.assertQuerysetEqual(Article.objects.all().order_by('headline'), ["<Article: Amazing article>", "<Article: An article>", "<Article: Article One>", "<Article: Boring article>"]) Article.objects.filter(headline__startswith='A').delete() self.assertQuerysetEqual(Article.objects.all().order_by('headline'), ["<Article: Boring article>"]) def test_not_equal_and_equal_operators_behave_as_expected_on_instances(self): some_pub_date = datetime(2014, 5, 16, 12, 1) a1 = Article.objects.create(headline='First', pub_date=some_pub_date) a2 = Article.objects.create(headline='Second', pub_date=some_pub_date) self.assertNotEqual(a1, a2) self.assertEqual(a1, Article.objects.get(id__exact=a1.id)) self.assertNotEqual(Article.objects.get(id__exact=a1.id), Article.objects.get(id__exact=a2.id)) @skipUnlessDBFeature('supports_microsecond_precision') def test_microsecond_precision(self): # In PostgreSQL, microsecond-level precision is available. a9 = Article( headline='Article 9', pub_date=datetime(2005, 7, 31, 12, 30, 45, 180), ) a9.save() self.assertEqual(Article.objects.get(pk=a9.pk).pub_date, datetime(2005, 7, 31, 12, 30, 45, 180)) @skipIfDBFeature('supports_microsecond_precision') def test_microsecond_precision_not_supported(self): # In MySQL, microsecond-level precision isn't always available. You'll # lose microsecond-level precision once the data is saved. a9 = Article( headline='Article 9', pub_date=datetime(2005, 7, 31, 12, 30, 45, 180), ) a9.save() self.assertEqual( Article.objects.get(id__exact=a9.id).pub_date, datetime(2005, 7, 31, 12, 30, 45), ) @skipIfDBFeature('supports_microsecond_precision') def test_microsecond_precision_not_supported_edge_case(self): # In MySQL, microsecond-level precision isn't always available. You'll # lose microsecond-level precision once the data is saved. a = Article.objects.create( headline='Article', pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999), ) self.assertEqual( Article.objects.get(pk=a.pk).pub_date, datetime(2008, 12, 31, 23, 59, 59), ) def test_manually_specify_primary_key(self): # You can manually specify the primary key when creating a new object. a101 = Article( id=101, headline='Article 101', pub_date=datetime(2005, 7, 31, 12, 30, 45), ) a101.save() a101 = Article.objects.get(pk=101) self.assertEqual(a101.headline, 'Article 101') def test_create_method(self): # You can create saved objects in a single step a10 = Article.objects.create( headline="Article 10", pub_date=datetime(2005, 7, 31, 12, 30, 45), ) self.assertEqual(Article.objects.get(headline="Article 10"), a10) def test_year_lookup_edge_case(self): # Edge-case test: A year lookup should retrieve all objects in # the given year, including Jan. 1 and Dec. 31. Article.objects.create( headline='Article 11', pub_date=datetime(2008, 1, 1), ) Article.objects.create( headline='Article 12', pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999), ) self.assertQuerysetEqual(Article.objects.filter(pub_date__year=2008), ["<Article: Article 11>", "<Article: Article 12>"]) def test_unicode_data(self): # Unicode data works, too. a = Article( headline='\u6797\u539f \u3081\u3050\u307f', pub_date=datetime(2005, 7, 28), ) a.save() self.assertEqual(Article.objects.get(pk=a.id).headline, '\u6797\u539f \u3081\u3050\u307f') def test_hash_function(self): # Model instances have a hash function, so they can be used in sets # or as dictionary keys. Two models compare as equal if their primary # keys are equal. a10 = Article.objects.create( headline="Article 10", pub_date=datetime(2005, 7, 31, 12, 30, 45), ) a11 = Article.objects.create( headline='Article 11', pub_date=datetime(2008, 1, 1), ) a12 = Article.objects.create( headline='Article 12', pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999), ) s = {a10, a11, a12} self.assertIn(Article.objects.get(headline='Article 11'), s) def test_field_ordering(self): """ Field instances have a `__lt__` comparison function to define an ordering based on their creation. Prior to #17851 this ordering comparison relied on the now unsupported `__cmp__` and was assuming compared objects were both Field instances raising `AttributeError` when it should have returned `NotImplemented`. """ f1 = Field() f2 = Field(auto_created=True) f3 = Field() self.assertLess(f2, f1) self.assertGreater(f3, f1) self.assertIsNotNone(f1) self.assertNotIn(f2, (None, 1, '')) def test_extra_method_select_argument_with_dashes_and_values(self): # The 'select' argument to extra() supports names with dashes in # them, as long as you use values(). Article.objects.create( headline="Article 10", pub_date=datetime(2005, 7, 31, 12, 30, 45), ) Article.objects.create( headline='Article 11', pub_date=datetime(2008, 1, 1), ) Article.objects.create( headline='Article 12', pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999), ) dicts = Article.objects.filter( pub_date__year=2008).extra( select={'dashed-value': '1'}).values('headline', 'dashed-value') self.assertEqual([sorted(d.items()) for d in dicts], [[('dashed-value', 1), ('headline', 'Article 11')], [('dashed-value', 1), ('headline', 'Article 12')]]) def test_extra_method_select_argument_with_dashes(self): # If you use 'select' with extra() and names containing dashes on a # query that's *not* a values() query, those extra 'select' values # will silently be ignored. Article.objects.create( headline="Article 10", pub_date=datetime(2005, 7, 31, 12, 30, 45), ) Article.objects.create( headline='Article 11', pub_date=datetime(2008, 1, 1), ) Article.objects.create( headline='Article 12', pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999), ) articles = Article.objects.filter( pub_date__year=2008).extra(select={'dashed-value': '1', 'undashedvalue': '2'}) self.assertEqual(articles[0].undashedvalue, 2) def test_create_relation_with_ugettext_lazy(self): """ Test that ugettext_lazy objects work when saving model instances through various methods. Refs #10498. """ notlazy = 'test' lazy = ugettext_lazy(notlazy) Article.objects.create(headline=lazy, pub_date=datetime.now()) article = Article.objects.get() self.assertEqual(article.headline, notlazy) # test that assign + save works with Promise objects article.headline = lazy article.save() self.assertEqual(article.headline, notlazy) # test .update() Article.objects.update(headline=lazy) article = Article.objects.get() self.assertEqual(article.headline, notlazy) # still test bulk_create() Article.objects.all().delete() Article.objects.bulk_create([Article(headline=lazy, pub_date=datetime.now())]) article = Article.objects.get() self.assertEqual(article.headline, notlazy) def test_emptyqs(self): # Can't be instantiated with self.assertRaises(TypeError): EmptyQuerySet() self.assertIsInstance(Article.objects.none(), EmptyQuerySet) def test_emptyqs_values(self): # test for #15959 Article.objects.create(headline='foo', pub_date=datetime.now()) with self.assertNumQueries(0): qs = Article.objects.none().values_list('pk') self.assertIsInstance(qs, EmptyQuerySet) self.assertEqual(len(qs), 0) def test_emptyqs_customqs(self): # A hacky test for custom QuerySet subclass - refs #17271 Article.objects.create(headline='foo', pub_date=datetime.now()) class CustomQuerySet(QuerySet): def do_something(self): return 'did something' qs = Article.objects.all() qs.__class__ = CustomQuerySet qs = qs.none() with self.assertNumQueries(0): self.assertEqual(len(qs), 0) self.assertIsInstance(qs, EmptyQuerySet) self.assertEqual(qs.do_something(), 'did something') def test_emptyqs_values_order(self): # Tests for ticket #17712 Article.objects.create(headline='foo', pub_date=datetime.now()) with self.assertNumQueries(0): self.assertEqual(len(Article.objects.none().values_list('id').order_by('id')), 0) with self.assertNumQueries(0): self.assertEqual(len(Article.objects.none().filter( id__in=Article.objects.values_list('id', flat=True))), 0) @skipUnlessDBFeature('can_distinct_on_fields') def test_emptyqs_distinct(self): # Tests for #19426 Article.objects.create(headline='foo', pub_date=datetime.now()) with self.assertNumQueries(0): self.assertEqual(len(Article.objects.none().distinct('headline', 'pub_date')), 0) def test_ticket_20278(self): sr = SelfRef.objects.create() with self.assertRaises(ObjectDoesNotExist): SelfRef.objects.get(selfref=sr) def test_eq(self): self.assertEqual(Article(id=1), Article(id=1)) self.assertNotEqual(Article(id=1), object()) self.assertNotEqual(object(), Article(id=1)) a = Article() self.assertEqual(a, a) self.assertNotEqual(Article(), a) def test_hash(self): # Value based on PK self.assertEqual(hash(Article(id=1)), hash(1)) with self.assertRaises(TypeError): # No PK value -> unhashable (because save() would then change # hash) hash(Article()) class ModelLookupTest(TestCase): def setUp(self): # Create an Article. self.a = Article( id=None, headline='Swallow programs in Python', pub_date=datetime(2005, 7, 28), ) # Save it into the database. You have to call save() explicitly. self.a.save() def test_all_lookup(self): # Change values by changing the attributes, then calling save(). self.a.headline = 'Parrot programs in Python' self.a.save() # Article.objects.all() returns all the articles in the database. self.assertQuerysetEqual(Article.objects.all(), ['<Article: Parrot programs in Python>']) def test_rich_lookup(self): # Django provides a rich database lookup API. self.assertEqual(Article.objects.get(id__exact=self.a.id), self.a) self.assertEqual(Article.objects.get(headline__startswith='Swallow'), self.a) self.assertEqual(Article.objects.get(pub_date__year=2005), self.a) self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7), self.a) self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7, pub_date__day=28), self.a) self.assertEqual(Article.objects.get(pub_date__week_day=5), self.a) def test_equal_lookup(self): # The "__exact" lookup type can be omitted, as a shortcut. self.assertEqual(Article.objects.get(id=self.a.id), self.a) self.assertEqual(Article.objects.get(headline='Swallow programs in Python'), self.a) self.assertQuerysetEqual( Article.objects.filter(pub_date__year=2005), ['<Article: Swallow programs in Python>'], ) self.assertQuerysetEqual( Article.objects.filter(pub_date__year=2004), [], ) self.assertQuerysetEqual( Article.objects.filter(pub_date__year=2005, pub_date__month=7), ['<Article: Swallow programs in Python>'], ) self.assertQuerysetEqual( Article.objects.filter(pub_date__week_day=5), ['<Article: Swallow programs in Python>'], ) self.assertQuerysetEqual( Article.objects.filter(pub_date__week_day=6), [], ) def test_does_not_exist(self): # Django raises an Article.DoesNotExist exception for get() if the # parameters don't match any object. six.assertRaisesRegex( self, ObjectDoesNotExist, "Article matching query does not exist.", Article.objects.get, id__exact=2000, ) # To avoid dict-ordering related errors check only one lookup # in single assert. self.assertRaises( ObjectDoesNotExist, Article.objects.get, pub_date__year=2005, pub_date__month=8, ) six.assertRaisesRegex( self, ObjectDoesNotExist, "Article matching query does not exist.", Article.objects.get, pub_date__week_day=6, ) def test_lookup_by_primary_key(self): # Lookup by a primary key is the most common case, so Django # provides a shortcut for primary-key exact lookups. # The following is identical to articles.get(id=a.id). self.assertEqual(Article.objects.get(pk=self.a.id), self.a) # pk can be used as a shortcut for the primary key name in any query. self.assertQuerysetEqual(Article.objects.filter(pk__in=[self.a.id]), ["<Article: Swallow programs in Python>"]) # Model instances of the same type and same ID are considered equal. a = Article.objects.get(pk=self.a.id) b = Article.objects.get(pk=self.a.id) self.assertEqual(a, b) def test_too_many(self): # Create a very similar object a = Article( id=None, headline='Swallow bites Python', pub_date=datetime(2005, 7, 28), ) a.save() self.assertEqual(Article.objects.count(), 2) # Django raises an Article.MultipleObjectsReturned exception if the # lookup matches more than one object six.assertRaisesRegex( self, MultipleObjectsReturned, "get\(\) returned more than one Article -- it returned 2!", Article.objects.get, headline__startswith='Swallow', ) six.assertRaisesRegex( self, MultipleObjectsReturned, "get\(\) returned more than one Article -- it returned 2!", Article.objects.get, pub_date__year=2005, ) six.assertRaisesRegex( self, MultipleObjectsReturned, "get\(\) returned more than one Article -- it returned 2!", Article.objects.get, pub_date__year=2005, pub_date__month=7, ) class ConcurrentSaveTests(TransactionTestCase): available_apps = ['basic'] @skipUnlessDBFeature('test_db_allows_multiple_connections') def test_concurrent_delete_with_save(self): """ Test fetching, deleting and finally saving an object - we should get an insert in this case. """ a = Article.objects.create(headline='foo', pub_date=datetime.now()) exceptions = [] def deleter(): try: # Do not delete a directly - doing so alters its state. Article.objects.filter(pk=a.pk).delete() except Exception as e: exceptions.append(e) finally: connections[DEFAULT_DB_ALIAS].close() self.assertEqual(len(exceptions), 0) t = threading.Thread(target=deleter) t.start() t.join() a.save() self.assertEqual(Article.objects.get(pk=a.pk).headline, 'foo') class ManagerTest(SimpleTestCase): QUERYSET_PROXY_METHODS = [ 'none', 'count', 'dates', 'datetimes', 'distinct', 'extra', 'get', 'get_or_create', 'update_or_create', 'create', 'bulk_create', 'filter', 'aggregate', 'annotate', 'complex_filter', 'exclude', 'in_bulk', 'iterator', 'earliest', 'latest', 'first', 'last', 'order_by', 'select_for_update', 'select_related', 'prefetch_related', 'values', 'values_list', 'update', 'reverse', 'defer', 'only', 'using', 'exists', '_insert', '_update', 'raw', ] def test_manager_methods(self): """ This test ensures that the correct set of methods from `QuerySet` are copied onto `Manager`. It's particularly useful to prevent accidentally leaking new methods into `Manager`. New `QuerySet` methods that should also be copied onto `Manager` will need to be added to `ManagerTest.QUERYSET_PROXY_METHODS`. """ self.assertEqual( sorted(BaseManager._get_queryset_methods(QuerySet).keys()), sorted(self.QUERYSET_PROXY_METHODS), ) class SelectOnSaveTests(TestCase): def test_select_on_save(self): a1 = Article.objects.create(pub_date=datetime.now()) with self.assertNumQueries(1): a1.save() asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now()) with self.assertNumQueries(2): asos.save() with self.assertNumQueries(1): asos.save(force_update=True) Article.objects.all().delete() with self.assertRaises(DatabaseError): with self.assertNumQueries(1): asos.save(force_update=True) def test_select_on_save_lying_update(self): """ Test that select_on_save works correctly if the database doesn't return correct information about matched rows from UPDATE. """ # Change the manager to not return "row matched" for update(). # We are going to change the Article's _base_manager class # dynamically. This is a bit of a hack, but it seems hard to # test this properly otherwise. Article's manager, because # proxy models use their parent model's _base_manager. orig_class = Article._base_manager.__class__ class FakeQuerySet(QuerySet): # Make sure the _update method below is in fact called. called = False def _update(self, *args, **kwargs): FakeQuerySet.called = True super(FakeQuerySet, self)._update(*args, **kwargs) return 0 class FakeManager(orig_class): def get_queryset(self): return FakeQuerySet(self.model) try: Article._base_manager.__class__ = FakeManager asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now()) with self.assertNumQueries(3): asos.save() self.assertTrue(FakeQuerySet.called) # This is not wanted behavior, but this is how Django has always # behaved for databases that do not return correct information # about matched rows for UPDATE. with self.assertRaises(DatabaseError): asos.save(force_update=True) with self.assertRaises(DatabaseError): asos.save(update_fields=['pub_date']) finally: Article._base_manager.__class__ = orig_class class ModelRefreshTests(TestCase): def _truncate_ms(self, val): # MySQL < 5.6.4 removes microseconds from the datetimes which can cause # problems when comparing the original value to that loaded from DB return val - timedelta(microseconds=val.microsecond) def test_refresh(self): a = Article.objects.create(pub_date=self._truncate_ms(datetime.now())) Article.objects.create(pub_date=self._truncate_ms(datetime.now())) Article.objects.filter(pk=a.pk).update(headline='new headline') with self.assertNumQueries(1): a.refresh_from_db() self.assertEqual(a.headline, 'new headline') orig_pub_date = a.pub_date new_pub_date = a.pub_date + timedelta(10) Article.objects.update(headline='new headline 2', pub_date=new_pub_date) with self.assertNumQueries(1): a.refresh_from_db(fields=['headline']) self.assertEqual(a.headline, 'new headline 2') self.assertEqual(a.pub_date, orig_pub_date) with self.assertNumQueries(1): a.refresh_from_db() self.assertEqual(a.pub_date, new_pub_date) def test_refresh_fk(self): s1 = SelfRef.objects.create() s2 = SelfRef.objects.create() s3 = SelfRef.objects.create(selfref=s1) s3_copy = SelfRef.objects.get(pk=s3.pk) s3_copy.selfref.touched = True s3.selfref = s2 s3.save() with self.assertNumQueries(1): s3_copy.refresh_from_db() with self.assertNumQueries(1): # The old related instance was thrown away (the selfref_id has # changed). It needs to be reloaded on access, so one query # executed. self.assertFalse(hasattr(s3_copy.selfref, 'touched')) self.assertEqual(s3_copy.selfref, s2) def test_refresh_null_fk(self): s1 = SelfRef.objects.create() s2 = SelfRef.objects.create(selfref=s1) s2.selfref = None s2.refresh_from_db() self.assertEqual(s2.selfref, s1) def test_refresh_unsaved(self): pub_date = self._truncate_ms(datetime.now()) a = Article.objects.create(pub_date=pub_date) a2 = Article(id=a.pk) with self.assertNumQueries(1): a2.refresh_from_db() self.assertEqual(a2.pub_date, pub_date) self.assertEqual(a2._state.db, "default") def test_refresh_no_fields(self): a = Article.objects.create(pub_date=self._truncate_ms(datetime.now())) with self.assertNumQueries(0): a.refresh_from_db(fields=[]) class TestRelatedObjectDeprecation(SimpleTestCase): def test_field_related_deprecation(self): field = SelfRef._meta.get_field('selfref') with warnings.catch_warnings(record=True) as warns: warnings.simplefilter('always') self.assertIsInstance(field.related, ForeignObjectRel) self.assertEqual(len(warns), 1) self.assertEqual( str(warns.pop().message), 'Usage of field.related has been deprecated. Use field.remote_field instead.' )
bsd-3-clause
looker/sentry
tests/sentry/web/frontend/test_organization_integration_setup.py
2
1461
from __future__ import absolute_import import pytest from sentry.testutils import PermissionTestCase, TestCase class OrganizationIntegrationSetupPermissionTest(PermissionTestCase): def setUp(self): super(OrganizationIntegrationSetupPermissionTest, self).setUp() self.path = '/organizations/{}/integrations/example/setup/'.format( self.organization.slug, ) # this currently redirects the user @pytest.mark.xfail def test_manager_can_load(self): self.assert_role_can_access(self.path, 'manager') # this currently redirects the user @pytest.mark.xfail def test_owner_can_load(self): self.assert_owner_can_access(self.path) class OrganizationIntegrationSetupTest(TestCase): def setUp(self): super(OrganizationIntegrationSetupTest, self).setUp() self.organization = self.create_organization(name='foo', owner=self.user) self.login_as(self.user) self.path = '/organizations/{}/integrations/example/setup/'.format( self.organization.slug, ) def test_basic_flow(self): resp = self.client.get(self.path) assert resp.status_code == 200 resp = self.client.post(self.path, data={'name': 'morty'}) assert resp.status_code == 200 # Check that we're binding the state back to the opening window # through the dialog's window.postMessage. assert 'morty' in resp.content
bsd-3-clause
wolfram74/numerical_methods_iserles_notes
venv/lib/python2.7/site-packages/sympy/plotting/pygletplot/tests/test_plotting.py
109
2653
from sympy.external.importtools import import_module disabled = False # if pyglet.gl fails to import, e.g. opengl is missing, we disable the tests pyglet_gl = import_module("pyglet.gl", catch=(OSError,)) pyglet_window = import_module("pyglet.window", catch=(OSError,)) if not pyglet_gl or not pyglet_window: disabled = True from sympy import symbols, sin, cos x, y, z = symbols('x, y, z') def test_import(): from sympy.plotting.pygletplot import PygletPlot def test_plot_2d(): from sympy.plotting.pygletplot import PygletPlot p = PygletPlot(x, [x, -5, 5, 4], visible=False) p.wait_for_calculations() def test_plot_2d_discontinuous(): from sympy.plotting.pygletplot import PygletPlot p = PygletPlot(1/x, [x, -1, 1, 2], visible=False) p.wait_for_calculations() def test_plot_3d(): from sympy.plotting.pygletplot import PygletPlot p = PygletPlot(x*y, [x, -5, 5, 5], [y, -5, 5, 5], visible=False) p.wait_for_calculations() def test_plot_3d_discontinuous(): from sympy.plotting.pygletplot import PygletPlot p = PygletPlot(1/x, [x, -3, 3, 6], [y, -1, 1, 1], visible=False) p.wait_for_calculations() def test_plot_2d_polar(): from sympy.plotting.pygletplot import PygletPlot p = PygletPlot(1/x, [x, -1, 1, 4], 'mode=polar', visible=False) p.wait_for_calculations() def test_plot_3d_cylinder(): from sympy.plotting.pygletplot import PygletPlot p = PygletPlot( 1/y, [x, 0, 6.282, 4], [y, -1, 1, 4], 'mode=polar;style=solid', visible=False) p.wait_for_calculations() def test_plot_3d_spherical(): from sympy.plotting.pygletplot import PygletPlot p = PygletPlot( 1, [x, 0, 6.282, 4], [y, 0, 3.141, 4], 'mode=spherical;style=wireframe', visible=False) p.wait_for_calculations() def test_plot_2d_parametric(): from sympy.plotting.pygletplot import PygletPlot p = PygletPlot(sin(x), cos(x), [x, 0, 6.282, 4], visible=False) p.wait_for_calculations() def test_plot_3d_parametric(): from sympy.plotting.pygletplot import PygletPlot p = PygletPlot(sin(x), cos(x), x/5.0, [x, 0, 6.282, 4], visible=False) p.wait_for_calculations() def _test_plot_log(): from sympy.plotting.pygletplot import PygletPlot p = PygletPlot(log(x), [x, 0, 6.282, 4], 'mode=polar', visible=False) p.wait_for_calculations() def test_plot_integral(): # Make sure it doesn't treat x as an independent variable from sympy.plotting.pygletplot import PygletPlot from sympy import Integral p = PygletPlot(Integral(z*x, (x, 1, z), (z, 1, y)), visible=False) p.wait_for_calculations()
mit
openhumanoids/oh-distro
software/models/model_transformation/mitUrdfUtils.py
1
8638
import os import copy from lxml import etree from subprocess import call from urlparse import urlparse xacro_path = os.path.join(os.getenv("DRC_BASE"), "software", "models", "model_transformation", "xacro.py") chull_script_path = os.path.join(os.getenv("DRC_BASE"), "software", "models", "model_transformation", "chull.mlx") def convertMeshTo(inFile, newExtension): outFile = os.path.splitext(inFile)[0] + newExtension convertMesh(inFile, outFile) def convertMesh(inFile, outFile): if not os.path.exists(outFile): call(["meshlabserver", "-i", inFile, "-o", outFile]) def createConvexHullMesh(inFile, outFile=None): if not outFile: inFileBase, inFileExtension = os.path.splitext(inFile) outFile = inFileBase + "_chull" + inFileExtension if not os.path.exists(outFile): call(["meshlabserver", "-i", inFile, "-s", chull_script_path, "-o", outFile]) def removeCollisions(urdf, linkNames): for name in linkNames: for element in urdf.findall("//link[@name='%s']/collision" % name): element.getparent().remove(element) def removeAllCollisions(urdf): for element in urdf.findall("//collision"): element.getparent().remove(element) return urdf def addCollisionsFromVisuals(urdf): for visual in urdf.findall("//visual"): collision = copy.deepcopy(visual) collision.tag = "collision" visual.getparent().append(collision) def addVisual(link): return etree.SubElement(link, 'visual') def addCollision(link): return etree.SubElement(link, 'collision') def addGeometry(element): return etree.SubElement(element, 'geometry') def addOrigin(element, xyz=[0.0, 0.0, 0.0], rpy=[0.0, 0.0, 0.0]): origin = etree.SubElement(element, 'origin') origin.set('xyz', '%8.5f %8.5f %8.5f' % tuple(xyz)) origin.set('rpy', '%8.5f %8.5f %8.5f' % tuple(rpy)) return origin def addBox(geometry, size=[1.0, 1.0, 1.0]): box = etree.SubElement(geometry, 'box') box.set('size', '%8.5f %8.5f %8.5f' % tuple(size)) return box def addFrame(urdf, frameName, linkName, xyz, rpy): frame = etree.SubElement(urdf.getroot(), "frame") frame.set("name", frameName) frame.set("link", linkName) frame.set("xyz", xyz) frame.set("rpy", rpy) return urdf def addContactPoint(urdf, linkName, xyz, group): link = urdf.find("link[@name='%s']" % linkName) collision_point = etree.SubElement(link, "collision") collision_point.set("group", group) origin = etree.SubElement(collision_point, "origin") origin.set("rpy", "0 0 0") origin.set("xyz", xyz) geometry = etree.SubElement(collision_point, "geometry") sphere = etree.SubElement(geometry, "sphere") sphere.set("radius", "0.0") visual_point = etree.SubElement(link, "visual") visual_point.set("group", group) origin = etree.SubElement(visual_point, "origin") origin.set("rpy", "0 0 0") origin.set("xyz", xyz) geometry = etree.SubElement(visual_point, "geometry") sphere = etree.SubElement(geometry, "sphere") sphere.set("radius", "0.01") return urdf def xacro(inFile, outFile, includes_only=False, recursive_includes=False, verbose=False): args = ["python", xacro_path, inFile, "-o", outFile] if includes_only: args.append("--includes") if recursive_includes: args.append("--recursive-includes") if verbose: print("Executing: " + " ".join(args)) call(args) def replacePackageWithPathInMeshPaths(urdf, newPath): for element in urdf.findall("//*[@filename]"): filename = element.get("filename") parsed_filename = urlparse(filename) if parsed_filename.scheme == "package": path = parsed_filename.netloc filename = os.path.join(newPath, path + parsed_filename.path) else: filename = parsed_filename.path element.set("filename", filename) def replaceMeshPaths(urdf, meshDirectory): for mesh in urdf.findall(".//mesh"): filename = mesh.get("filename") newFilename = os.path.join(meshDirectory, os.path.basename(filename)) mesh.set("filename", newFilename) return urdf def useObjMeshes(urdf): for mesh in urdf.findall(".//mesh"): filename = mesh.get("filename") objFilename = os.path.splitext(filename)[0] + ".obj" mesh.set("filename", objFilename) return urdf def useConvexHullMeshes(urdf): for mesh in urdf.findall(".//collision/geometry/mesh"): filename = mesh.get("filename") filename_base, filename_ext = os.path.splitext(filename) convexHullFilename = filename_base + "_chull" + filename_ext mesh.set("filename", convexHullFilename) return urdf def renameJoints(urdf, jointNameMap): for oldName, newName in jointNameMap.iteritems(): for element in urdf.xpath("//*[contains(@name,'%s')]" % oldName): element.set("name", element.get("name").replace(oldName, newName)) return urdf def weldJoint(urdf, jointName): joint = urdf.xpath("//joint[@name = '%s']" % jointName)[0] joint.set("type", "fixed") def weldAllJoints(urdf): for joint in urdf.findall("//joint"): joint.set("type", "fixed") def addCollisionFilterGroup(urdf, name, members, groupsToIgnore): cfg = etree.SubElement(urdf.getroot(), "collision_filter_group") cfg.set('name',name) for link in members: m = etree.SubElement(cfg, 'member') m.set('link', link) for group in groupsToIgnore: g = etree.SubElement(cfg, 'ignored_collision_filter_group') g.set('collision_filter_group', group) def copyLinkProperties(urdf, sourceLinkName, destinationLinkName): sourceLink = urdf.find("link[@name='%s']" % sourceLinkName) destinationLink = urdf.find("link[@name='%s']" % destinationLinkName) return copyElementProperties(urdf, sourceLink, destinationLink) def copyJointProperties(urdf, sourceJointName, destinationJointName, additionalExceptions = []): sourceJoint = urdf.find("joint[@name='%s']" % sourceJointName) destinationJoint = urdf.find("joint[@name='%s']" % destinationJointName) return copyElementProperties(urdf, sourceJoint, destinationJoint, ['parent', 'child'] + additionalExceptions) def copyElementProperties(urdf, sourceElement, destinationElement, exceptionTagNames = []): ''' doing it this way to try to preserve the order of the elements for easy text comparison ''' sourceChildrenToAppend = copy.copy(list(sourceElement)) for destinationChild in destinationElement: if destinationChild.tag not in exceptionTagNames: sourceChildrenWithThisTag = sourceElement.findall(destinationChild.tag) destinationChildrenWithThisTag = destinationElement.findall(destinationChild.tag) if len(sourceChildrenWithThisTag) == 1 and len(destinationChildrenWithThisTag) == 1: # replace and remove from sourceChildrenToAppend sourceChild = sourceChildrenWithThisTag[0] destinationElement.replace(destinationChild, copy.deepcopy(sourceChild)) sourceChildrenToAppend.remove(sourceChild) else: # remove and leave in sourceChildrenToAppend destinationElement.remove(destinationChild) for sourceChild in sourceChildrenToAppend: if sourceChild.tag not in exceptionTagNames: destinationElement.append(copy.deepcopy(sourceChild)) return urdf def invertJointAxis(urdf, jointName): axis = urdf.find("joint[@name='%s']/axis" % jointName) xyz = axis.get('xyz').split(' ') axis.set('xyz', ' '.join(map(lambda x : str(-float(x)), xyz))) return urdf def setJointOriginRPY(urdf, jointName, rpy): origin = urdf.find("joint[@name='%s']/origin" % jointName) origin.set('rpy', ' '.join(map(lambda x : str(x), rpy))) return urdf def setJointLimits(urdf, jointName, lower, upper): origin = urdf.find("joint[@name='%s']/limit" % jointName) origin.set('lower', str(lower)) origin.set('upper', str(upper)) return urdf def setLinkVisualRPY(urdf, linkName, rpy): visual = urdf.find("link[@name='%s']/visual" % linkName) origin = visual.get('origin') if origin is None: origin = etree.SubElement(visual, 'origin', {'rpy': "0 0 0", 'xyz': "0 0 0"}) origin.set('rpy', ' '.join(map(lambda x : str(x), rpy))) return urdf
bsd-3-clause
FRC-Team-3140/north-american-happiness
lib/python2.7/site-packages/sqlalchemy/event/attr.py
1
12632
# event/attr.py # Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Attribute implementation for _Dispatch classes. The various listener targets for a particular event class are represented as attributes, which refer to collections of listeners to be fired off. These collections can exist at the class level as well as at the instance level. An event is fired off using code like this:: some_object.dispatch.first_connect(arg1, arg2) Above, ``some_object.dispatch`` would be an instance of ``_Dispatch`` and ``first_connect`` is typically an instance of ``_ListenerCollection`` if event listeners are present, or ``_EmptyListener`` if none are present. The attribute mechanics here spend effort trying to ensure listener functions are available with a minimum of function call overhead, that unnecessary objects aren't created (i.e. many empty per-instance listener collections), as well as that everything is garbage collectable when owning references are lost. Other features such as "propagation" of listener functions across many ``_Dispatch`` instances, "joining" of multiple ``_Dispatch`` instances, as well as support for subclass propagation (e.g. events assigned to ``Pool`` vs. ``QueuePool``) are all implemented here. """ from __future__ import absolute_import, with_statement from .. import util from ..util import threading from . import registry from . import legacy from itertools import chain import weakref class RefCollection(object): @util.memoized_property def ref(self): return weakref.ref(self, registry._collection_gced) class _DispatchDescriptor(RefCollection): """Class-level attributes on :class:`._Dispatch` classes.""" def __init__(self, parent_dispatch_cls, fn): self.__name__ = fn.__name__ argspec = util.inspect_getargspec(fn) self.arg_names = argspec.args[1:] self.has_kw = bool(argspec.keywords) self.legacy_signatures = list(reversed( sorted( getattr(fn, '_legacy_signatures', []), key=lambda s: s[0] ) )) self.__doc__ = fn.__doc__ = legacy._augment_fn_docs( self, parent_dispatch_cls, fn) self._clslevel = weakref.WeakKeyDictionary() self._empty_listeners = weakref.WeakKeyDictionary() def _adjust_fn_spec(self, fn, named): if named: fn = self._wrap_fn_for_kw(fn) if self.legacy_signatures: try: argspec = util.get_callable_argspec(fn, no_self=True) except ValueError: pass else: fn = legacy._wrap_fn_for_legacy(self, fn, argspec) return fn def _wrap_fn_for_kw(self, fn): def wrap_kw(*args, **kw): argdict = dict(zip(self.arg_names, args)) argdict.update(kw) return fn(**argdict) return wrap_kw def insert(self, event_key, propagate): target = event_key.dispatch_target assert isinstance(target, type), \ "Class-level Event targets must be classes." stack = [target] while stack: cls = stack.pop(0) stack.extend(cls.__subclasses__()) if cls is not target and cls not in self._clslevel: self.update_subclass(cls) else: if cls not in self._clslevel: self._clslevel[cls] = [] self._clslevel[cls].insert(0, event_key._listen_fn) registry._stored_in_collection(event_key, self) def append(self, event_key, propagate): target = event_key.dispatch_target assert isinstance(target, type), \ "Class-level Event targets must be classes." stack = [target] while stack: cls = stack.pop(0) stack.extend(cls.__subclasses__()) if cls is not target and cls not in self._clslevel: self.update_subclass(cls) else: if cls not in self._clslevel: self._clslevel[cls] = [] self._clslevel[cls].append(event_key._listen_fn) registry._stored_in_collection(event_key, self) def update_subclass(self, target): if target not in self._clslevel: self._clslevel[target] = [] clslevel = self._clslevel[target] for cls in target.__mro__[1:]: if cls in self._clslevel: clslevel.extend([ fn for fn in self._clslevel[cls] if fn not in clslevel ]) def remove(self, event_key): target = event_key.dispatch_target stack = [target] while stack: cls = stack.pop(0) stack.extend(cls.__subclasses__()) if cls in self._clslevel: self._clslevel[cls].remove(event_key.fn) registry._removed_from_collection(event_key, self) def clear(self): """Clear all class level listeners""" to_clear = set() for dispatcher in self._clslevel.values(): to_clear.update(dispatcher) dispatcher[:] = [] registry._clear(self, to_clear) def for_modify(self, obj): """Return an event collection which can be modified. For _DispatchDescriptor at the class level of a dispatcher, this returns self. """ return self def __get__(self, obj, cls): if obj is None: return self elif obj._parent_cls in self._empty_listeners: ret = self._empty_listeners[obj._parent_cls] else: self._empty_listeners[obj._parent_cls] = ret = \ _EmptyListener(self, obj._parent_cls) # assigning it to __dict__ means # memoized for fast re-access. but more memory. obj.__dict__[self.__name__] = ret return ret class _HasParentDispatchDescriptor(object): def _adjust_fn_spec(self, fn, named): return self.parent._adjust_fn_spec(fn, named) class _EmptyListener(_HasParentDispatchDescriptor): """Serves as a class-level interface to the events served by a _DispatchDescriptor, when there are no instance-level events present. Is replaced by _ListenerCollection when instance-level events are added. """ def __init__(self, parent, target_cls): if target_cls not in parent._clslevel: parent.update_subclass(target_cls) self.parent = parent # _DispatchDescriptor self.parent_listeners = parent._clslevel[target_cls] self.name = parent.__name__ self.propagate = frozenset() self.listeners = () def for_modify(self, obj): """Return an event collection which can be modified. For _EmptyListener at the instance level of a dispatcher, this generates a new _ListenerCollection, applies it to the instance, and returns it. """ result = _ListenerCollection(self.parent, obj._parent_cls) if obj.__dict__[self.name] is self: obj.__dict__[self.name] = result return result def _needs_modify(self, *args, **kw): raise NotImplementedError("need to call for_modify()") exec_once = insert = append = remove = clear = _needs_modify def __call__(self, *args, **kw): """Execute this event.""" for fn in self.parent_listeners: fn(*args, **kw) def __len__(self): return len(self.parent_listeners) def __iter__(self): return iter(self.parent_listeners) def __bool__(self): return bool(self.parent_listeners) __nonzero__ = __bool__ class _CompoundListener(_HasParentDispatchDescriptor): _exec_once = False @util.memoized_property def _exec_once_mutex(self): return threading.Lock() def exec_once(self, *args, **kw): """Execute this event, but only if it has not been executed already for this collection.""" if not self._exec_once: with self._exec_once_mutex: if not self._exec_once: try: self(*args, **kw) finally: self._exec_once = True def __call__(self, *args, **kw): """Execute this event.""" for fn in self.parent_listeners: fn(*args, **kw) for fn in self.listeners: fn(*args, **kw) def __len__(self): return len(self.parent_listeners) + len(self.listeners) def __iter__(self): return chain(self.parent_listeners, self.listeners) def __bool__(self): return bool(self.listeners or self.parent_listeners) __nonzero__ = __bool__ class _ListenerCollection(RefCollection, _CompoundListener): """Instance-level attributes on instances of :class:`._Dispatch`. Represents a collection of listeners. As of 0.7.9, _ListenerCollection is only first created via the _EmptyListener.for_modify() method. """ def __init__(self, parent, target_cls): if target_cls not in parent._clslevel: parent.update_subclass(target_cls) self.parent_listeners = parent._clslevel[target_cls] self.parent = parent self.name = parent.__name__ self.listeners = [] self.propagate = set() def for_modify(self, obj): """Return an event collection which can be modified. For _ListenerCollection at the instance level of a dispatcher, this returns self. """ return self def _update(self, other, only_propagate=True): """Populate from the listeners in another :class:`_Dispatch` object.""" existing_listeners = self.listeners existing_listener_set = set(existing_listeners) self.propagate.update(other.propagate) other_listeners = [l for l in other.listeners if l not in existing_listener_set and not only_propagate or l in self.propagate ] existing_listeners.extend(other_listeners) to_associate = other.propagate.union(other_listeners) registry._stored_in_collection_multi(self, other, to_associate) def insert(self, event_key, propagate): if event_key._listen_fn not in self.listeners: event_key.prepend_to_list(self, self.listeners) if propagate: self.propagate.add(event_key._listen_fn) def append(self, event_key, propagate): if event_key._listen_fn not in self.listeners: event_key.append_to_list(self, self.listeners) if propagate: self.propagate.add(event_key._listen_fn) def remove(self, event_key): self.listeners.remove(event_key._listen_fn) self.propagate.discard(event_key._listen_fn) registry._removed_from_collection(event_key, self) def clear(self): registry._clear(self, self.listeners) self.propagate.clear() self.listeners[:] = [] class _JoinedDispatchDescriptor(object): def __init__(self, name): self.name = name def __get__(self, obj, cls): if obj is None: return self else: obj.__dict__[self.name] = ret = _JoinedListener( obj.parent, self.name, getattr(obj.local, self.name) ) return ret class _JoinedListener(_CompoundListener): _exec_once = False def __init__(self, parent, name, local): self.parent = parent self.name = name self.local = local self.parent_listeners = self.local @property def listeners(self): return getattr(self.parent, self.name) def _adjust_fn_spec(self, fn, named): return self.local._adjust_fn_spec(fn, named) def for_modify(self, obj): self.local = self.parent_listeners = self.local.for_modify(obj) return self def insert(self, event_key, propagate): self.local.insert(event_key, propagate) def append(self, event_key, propagate): self.local.append(event_key, propagate) def remove(self, event_key): self.local.remove(event_key) def clear(self): raise NotImplementedError()
mit
steveb/heat
heat/engine/resources/openstack/trove/os_database.py
3
28190
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import six from heat.common import exception from heat.common.i18n import _ from heat.common.i18n import _LI from heat.common.i18n import _LW from heat.engine import attributes from heat.engine import constraints from heat.engine import properties from heat.engine import resource from heat.engine import support LOG = logging.getLogger(__name__) class OSDBInstance(resource.Resource): """OpenStack cloud database instance resource. Trove is Database as a Service for OpenStack. It's designed to run entirely on OpenStack, with the goal of allowing users to quickly and easily utilize the features of a relational or non-relational database without the burden of handling complex administrative tasks. """ support_status = support.SupportStatus(version='2014.1') TROVE_STATUS = ( ERROR, FAILED, ACTIVE, ) = ( 'ERROR', 'FAILED', 'ACTIVE', ) TROVE_STATUS_REASON = { FAILED: _('The database instance was created, but heat failed to set ' 'up the datastore. If a database instance is in the FAILED ' 'state, it should be deleted and a new one should be ' 'created.'), ERROR: _('The last operation for the database instance failed due to ' 'an error.'), } BAD_STATUSES = (ERROR, FAILED) PROPERTIES = ( NAME, FLAVOR, SIZE, DATABASES, USERS, AVAILABILITY_ZONE, RESTORE_POINT, DATASTORE_TYPE, DATASTORE_VERSION, NICS, REPLICA_OF, REPLICA_COUNT, ) = ( 'name', 'flavor', 'size', 'databases', 'users', 'availability_zone', 'restore_point', 'datastore_type', 'datastore_version', 'networks', 'replica_of', 'replica_count' ) _DATABASE_KEYS = ( DATABASE_CHARACTER_SET, DATABASE_COLLATE, DATABASE_NAME, ) = ( 'character_set', 'collate', 'name', ) _USER_KEYS = ( USER_NAME, USER_PASSWORD, USER_HOST, USER_DATABASES, ) = ( 'name', 'password', 'host', 'databases', ) _NICS_KEYS = ( NET, PORT, V4_FIXED_IP ) = ( 'network', 'port', 'fixed_ip' ) ATTRIBUTES = ( HOSTNAME, HREF, ) = ( 'hostname', 'href', ) properties_schema = { NAME: properties.Schema( properties.Schema.STRING, _('Name of the DB instance to create.'), update_allowed=True, constraints=[ constraints.Length(max=255), ] ), FLAVOR: properties.Schema( properties.Schema.STRING, _('Reference to a flavor for creating DB instance.'), required=True, update_allowed=True, constraints=[ constraints.CustomConstraint('trove.flavor') ] ), DATASTORE_TYPE: properties.Schema( properties.Schema.STRING, _("Name of registered datastore type."), constraints=[ constraints.Length(max=255) ] ), DATASTORE_VERSION: properties.Schema( properties.Schema.STRING, _("Name of the registered datastore version. " "It must exist for provided datastore type. " "Defaults to using single active version. " "If several active versions exist for provided datastore type, " "explicit value for this parameter must be specified."), constraints=[constraints.Length(max=255)] ), SIZE: properties.Schema( properties.Schema.INTEGER, _('Database volume size in GB.'), required=True, update_allowed=True, constraints=[ constraints.Range(1, 150), ] ), NICS: properties.Schema( properties.Schema.LIST, _("List of network interfaces to create on instance."), default=[], schema=properties.Schema( properties.Schema.MAP, schema={ NET: properties.Schema( properties.Schema.STRING, _('Name or UUID of the network to attach this NIC to. ' 'Either %(port)s or %(net)s must be specified.') % { 'port': PORT, 'net': NET}, constraints=[ constraints.CustomConstraint('neutron.network') ] ), PORT: properties.Schema( properties.Schema.STRING, _('Name or UUID of Neutron port to attach this ' 'NIC to. ' 'Either %(port)s or %(net)s must be specified.') % { 'port': PORT, 'net': NET}, constraints=[ constraints.CustomConstraint('neutron.port') ], ), V4_FIXED_IP: properties.Schema( properties.Schema.STRING, _('Fixed IPv4 address for this NIC.'), constraints=[ constraints.CustomConstraint('ip_addr') ] ), }, ), ), DATABASES: properties.Schema( properties.Schema.LIST, _('List of databases to be created on DB instance creation.'), default=[], update_allowed=True, schema=properties.Schema( properties.Schema.MAP, schema={ DATABASE_CHARACTER_SET: properties.Schema( properties.Schema.STRING, _('Set of symbols and encodings.'), default='utf8' ), DATABASE_COLLATE: properties.Schema( properties.Schema.STRING, _('Set of rules for comparing characters in a ' 'character set.'), default='utf8_general_ci' ), DATABASE_NAME: properties.Schema( properties.Schema.STRING, _('Specifies database names for creating ' 'databases on instance creation.'), required=True, constraints=[ constraints.Length(max=64), constraints.AllowedPattern(r'[a-zA-Z0-9_]+' r'[a-zA-Z0-9_@?#\s]*' r'[a-zA-Z0-9_]+'), ] ), }, ) ), USERS: properties.Schema( properties.Schema.LIST, _('List of users to be created on DB instance creation.'), default=[], update_allowed=True, schema=properties.Schema( properties.Schema.MAP, schema={ USER_NAME: properties.Schema( properties.Schema.STRING, _('User name to create a user on instance ' 'creation.'), required=True, update_allowed=True, constraints=[ constraints.Length(max=16), constraints.AllowedPattern(r'[a-zA-Z0-9_]+' r'[a-zA-Z0-9_@?#\s]*' r'[a-zA-Z0-9_]+'), ] ), USER_PASSWORD: properties.Schema( properties.Schema.STRING, _('Password for those users on instance ' 'creation.'), required=True, update_allowed=True, constraints=[ constraints.AllowedPattern(r'[a-zA-Z0-9_]+' r'[a-zA-Z0-9_@?#\s]*' r'[a-zA-Z0-9_]+'), ] ), USER_HOST: properties.Schema( properties.Schema.STRING, _('The host from which a user is allowed to ' 'connect to the database.'), default='%', update_allowed=True ), USER_DATABASES: properties.Schema( properties.Schema.LIST, _('Names of databases that those users can ' 'access on instance creation.'), schema=properties.Schema( properties.Schema.STRING, ), required=True, update_allowed=True, constraints=[ constraints.Length(min=1), ] ), }, ) ), AVAILABILITY_ZONE: properties.Schema( properties.Schema.STRING, _('Name of the availability zone for DB instance.') ), RESTORE_POINT: properties.Schema( properties.Schema.STRING, _('DB instance restore point.') ), REPLICA_OF: properties.Schema( properties.Schema.STRING, _('Identifier of the source instance to replicate.'), support_status=support.SupportStatus(version='5.0.0') ), REPLICA_COUNT: properties.Schema( properties.Schema.INTEGER, _('The number of replicas to be created.'), support_status=support.SupportStatus(version='5.0.0') ), } attributes_schema = { HOSTNAME: attributes.Schema( _("Hostname of the instance."), type=attributes.Schema.STRING ), HREF: attributes.Schema( _("Api endpoint reference of the instance."), type=attributes.Schema.STRING ), } default_client_name = 'trove' entity = 'instances' def __init__(self, name, json_snippet, stack): super(OSDBInstance, self).__init__(name, json_snippet, stack) self._href = None self._dbinstance = None @property def dbinstance(self): """Get the trove dbinstance.""" if not self._dbinstance and self.resource_id: self._dbinstance = self.client().instances.get(self.resource_id) return self._dbinstance def _dbinstance_name(self): name = self.properties[self.NAME] if name: return name return self.physical_resource_name() def handle_create(self): """Create cloud database instance.""" self.flavor = self.client_plugin().find_flavor_by_name_or_id( self.properties[self.FLAVOR]) self.volume = {'size': self.properties[self.SIZE]} self.databases = self.properties[self.DATABASES] self.users = self.properties[self.USERS] restore_point = self.properties[self.RESTORE_POINT] if restore_point: restore_point = {"backupRef": restore_point} zone = self.properties[self.AVAILABILITY_ZONE] self.datastore_type = self.properties[self.DATASTORE_TYPE] self.datastore_version = self.properties[self.DATASTORE_VERSION] replica_of = self.properties[self.REPLICA_OF] replica_count = self.properties[self.REPLICA_COUNT] # convert user databases to format required for troveclient. # that is, list of database dictionaries for user in self.users: dbs = [{'name': db} for db in user.get(self.USER_DATABASES, [])] user[self.USER_DATABASES] = dbs # convert networks to format required by troveclient nics = [] for nic in self.properties[self.NICS]: nic_dict = {} net = nic.get(self.NET) if net: if self.is_using_neutron(): net_id = self.client_plugin( 'neutron').find_resourceid_by_name_or_id('network', net) else: net_id = (self.client_plugin( 'nova').get_nova_network_id(net)) nic_dict['net-id'] = net_id port = nic.get(self.PORT) if port: neutron = self.client_plugin('neutron') nic_dict['port-id'] = neutron.find_resourceid_by_name_or_id( 'port', port) ip = nic.get(self.V4_FIXED_IP) if ip: nic_dict['v4-fixed-ip'] = ip nics.append(nic_dict) # create db instance instance = self.client().instances.create( self._dbinstance_name(), self.flavor, volume=self.volume, databases=self.databases, users=self.users, restorePoint=restore_point, availability_zone=zone, datastore=self.datastore_type, datastore_version=self.datastore_version, nics=nics, replica_of=replica_of, replica_count=replica_count) self.resource_id_set(instance.id) return instance.id def _refresh_instance(self, instance_id): try: instance = self.client().instances.get(instance_id) return instance except Exception as exc: if self.client_plugin().is_over_limit(exc): LOG.warning(_LW("Stack %(name)s (%(id)s) received an " "OverLimit response during instance.get():" " %(exception)s"), {'name': self.stack.name, 'id': self.stack.id, 'exception': exc}) return None else: raise def check_create_complete(self, instance_id): """Check if cloud DB instance creation is complete.""" instance = self._refresh_instance(instance_id) # refresh attributes if instance is None: return False if instance.status in self.BAD_STATUSES: raise exception.ResourceInError( resource_status=instance.status, status_reason=self.TROVE_STATUS_REASON.get(instance.status, _("Unknown"))) if instance.status != self.ACTIVE: return False LOG.info(_LI("Database instance %(database)s created (flavor:%(" "flavor)s,volume:%(volume)s, datastore:%(" "datastore_type)s, datastore_version:%(" "datastore_version)s)"), {'database': self._dbinstance_name(), 'flavor': self.flavor, 'volume': self.volume, 'datastore_type': self.datastore_type, 'datastore_version': self.datastore_version}) return True def handle_check(self): instance = self.client().instances.get(self.resource_id) status = instance.status checks = [ {'attr': 'status', 'expected': self.ACTIVE, 'current': status}, ] self._verify_check_conditions(checks) def handle_update(self, json_snippet, tmpl_diff, prop_diff): updates = {} if prop_diff: instance = self.client().instances.get(self.resource_id) if self.NAME in prop_diff: updates.update({self.NAME: prop_diff[self.NAME]}) if self.FLAVOR in prop_diff: flvid = prop_diff[self.FLAVOR] flv = self.client_plugin().get_flavor_id(flvid) updates.update({self.FLAVOR: flv}) if self.SIZE in prop_diff: updates.update({self.SIZE: prop_diff[self.SIZE]}) if self.DATABASES in prop_diff: current = [d.name for d in self.client().databases.list(instance)] desired = [d[self.DATABASE_NAME] for d in prop_diff[self.DATABASES]] for db in prop_diff[self.DATABASES]: dbname = db[self.DATABASE_NAME] if dbname not in current: db['ACTION'] = self.CREATE for dbname in current: if dbname not in desired: deleted = {self.DATABASE_NAME: dbname, 'ACTION': self.DELETE} prop_diff[self.DATABASES].append(deleted) updates.update({self.DATABASES: prop_diff[self.DATABASES]}) if self.USERS in prop_diff: current = [u.name for u in self.client().users.list(instance)] desired = [u[self.USER_NAME] for u in prop_diff[self.USERS]] for usr in prop_diff[self.USERS]: if usr[self.USER_NAME] not in current: usr['ACTION'] = self.CREATE for usr in current: if usr not in desired: prop_diff[self.USERS].append({self.USER_NAME: usr, 'ACTION': self.DELETE}) updates.update({self.USERS: prop_diff[self.USERS]}) return updates def check_update_complete(self, updates): instance = self.client().instances.get(self.resource_id) if instance.status in self.BAD_STATUSES: raise exception.ResourceInError( resource_status=instance.status, status_reason=self.TROVE_STATUS_REASON.get(instance.status, _("Unknown"))) if updates: if instance.status != self.ACTIVE: dmsg = ("Instance is in status %(now)s. Waiting on status" " %(stat)s") LOG.debug(dmsg % {"now": instance.status, "stat": self.ACTIVE}) return False try: return ( self._update_name(instance, updates.get(self.NAME)) and self._update_flavor(instance, updates.get(self.FLAVOR)) and self._update_size(instance, updates.get(self.SIZE)) and self._update_databases(instance, updates.get(self.DATABASES)) and self._update_users(instance, updates.get(self.USERS)) ) except Exception as exc: if self.client_plugin().is_client_exception(exc): # the instance could have updated between the time # we retrieve it and try to update it so check again if self.client_plugin().is_over_limit(exc): LOG.debug("API rate limit: %(ex)s. Retrying." % {'ex': six.text_type(exc)}) return False if "No change was requested" in six.text_type(exc): LOG.warning(_LW("Unexpected instance state change " "during update. Retrying.")) return False raise exc return True def _update_name(self, instance, name): if name and instance.name != name: self.client().instances.edit(instance, name=name) return False return True def _update_flavor(self, instance, new_flavor): if new_flavor: current_flav = six.text_type(instance.flavor['id']) new_flav = six.text_type(new_flavor) if new_flav != current_flav: dmsg = "Resizing instance flavor from %(old)s to %(new)s" LOG.debug(dmsg % {"old": current_flav, "new": new_flav}) self.client().instances.resize_instance(instance, new_flavor) return False return True def _update_size(self, instance, new_size): if new_size and instance.volume['size'] != new_size: dmsg = "Resizing instance storage from %(old)s to %(new)s" LOG.debug(dmsg % {"old": instance.volume['size'], "new": new_size}) self.client().instances.resize_volume(instance, new_size) return False return True def _update_databases(self, instance, databases): if databases: for db in databases: if db.get("ACTION") == self.CREATE: db.pop("ACTION", None) dmsg = "Adding new database %(db)s to instance" LOG.debug(dmsg % {"db": db}) self.client().databases.create(instance, [db]) elif db.get("ACTION") == self.DELETE: dmsg = ("Deleting existing database %(db)s from " "instance") LOG.debug(dmsg % {"db": db['name']}) self.client().databases.delete(instance, db['name']) return True def _update_users(self, instance, users): if users: for usr in users: dbs = [{'name': db} for db in usr.get(self.USER_DATABASES, [])] usr[self.USER_DATABASES] = dbs if usr.get("ACTION") == self.CREATE: usr.pop("ACTION", None) dmsg = "Adding new user %(u)s to instance" LOG.debug(dmsg % {"u": usr}) self.client().users.create(instance, [usr]) elif usr.get("ACTION") == self.DELETE: dmsg = ("Deleting existing user %(u)s from " "instance") LOG.debug(dmsg % {"u": usr['name']}) self.client().users.delete(instance, usr['name']) else: newattrs = {} if usr.get(self.USER_HOST): newattrs[self.USER_HOST] = usr[self.USER_HOST] if usr.get(self.USER_PASSWORD): newattrs[self.USER_PASSWORD] = usr[self.USER_PASSWORD] if newattrs: self.client().users.update_attributes( instance, usr['name'], newuserattr=newattrs, hostname=instance.hostname) current = self.client().users.get(instance, usr[self.USER_NAME]) dbs = [db['name'] for db in current.databases] desired = [db['name'] for db in usr.get(self.USER_DATABASES, [])] grants = [db for db in desired if db not in dbs] revokes = [db for db in dbs if db not in desired] if grants: self.client().users.grant(instance, usr[self.USER_NAME], grants) if revokes: self.client().users.revoke(instance, usr[self.USER_NAME], revokes) return True def handle_delete(self): """Delete a cloud database instance.""" if not self.resource_id: return try: instance = self.client().instances.get(self.resource_id) except Exception as ex: self.client_plugin().ignore_not_found(ex) else: instance.delete() return instance.id def check_delete_complete(self, instance_id): """Check for completion of cloud DB instance deletion.""" if not instance_id: return True try: # For some time trove instance may continue to live self._refresh_instance(instance_id) except Exception as ex: self.client_plugin().ignore_not_found(ex) return True return False def validate(self): """Validate any of the provided params.""" res = super(OSDBInstance, self).validate() if res: return res datastore_type = self.properties[self.DATASTORE_TYPE] datastore_version = self.properties[self.DATASTORE_VERSION] self.client_plugin().validate_datastore( datastore_type, datastore_version, self.DATASTORE_TYPE, self.DATASTORE_VERSION) # check validity of user and databases users = self.properties[self.USERS] if users: databases = self.properties[self.DATABASES] if not databases: msg = _('Databases property is required if users property ' 'is provided for resource %s.') % self.name raise exception.StackValidationFailed(message=msg) db_names = set([db[self.DATABASE_NAME] for db in databases]) for user in users: missing_db = [db_name for db_name in user[self.USER_DATABASES] if db_name not in db_names] if missing_db: msg = (_('Database %(dbs)s specified for user does ' 'not exist in databases for resource %(name)s.') % {'dbs': missing_db, 'name': self.name}) raise exception.StackValidationFailed(message=msg) # check validity of NICS is_neutron = self.is_using_neutron() nics = self.properties[self.NICS] for nic in nics: if not is_neutron and nic.get(self.PORT): msg = _("Can not use %s property on Nova-network.") % self.PORT raise exception.StackValidationFailed(message=msg) if bool(nic.get(self.NET)) == bool(nic.get(self.PORT)): msg = _("Either %(net)s or %(port)s must be provided.") % { 'net': self.NET, 'port': self.PORT} raise exception.StackValidationFailed(message=msg) def href(self): if not self._href and self.dbinstance: if not self.dbinstance.links: self._href = None else: for link in self.dbinstance.links: if link['rel'] == 'self': self._href = link[self.HREF] break return self._href def _resolve_attribute(self, name): if name == self.HOSTNAME: return self.dbinstance.hostname elif name == self.HREF: return self.href() def resource_mapping(): return { 'OS::Trove::Instance': OSDBInstance, }
apache-2.0
teamotrinidad/plugin.video.evaristo
servers/downupload.py
36
4934
# -*- coding: utf-8 -*- #------------------------------------------------------------ # pelisalacarta - XBMC Plugin # Conector para videos externos de downupload # http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/ #------------------------------------------------------------ import urlparse,urllib2,urllib,re import os from core import scrapertools from core import logger from core import config from core import unpackerjs def get_video_url( page_url , premium = False , user="" , password="", video_password="" ): logger.info("[downupload.py] get_video_url(page_url='%s')" % page_url) page_url = page_url.replace("amp;","") data = scrapertools.cache_page(page_url) video_urls = [] # s1.addVariable('file','http://78.140.181.136:182/d/kka3sx52abiuphevyzfirfaqtihgyq5xlvblnetok2mj4llocdeturoy/video.mp4'); # http://downupload.com:182/d/k2a3kxf2abiuphevyzfirgajremkk3if57xcpelwboz4hbzjnfsvbit6/video.mp4 patron = "(http://[\S]+\.mp4)" matches = re.compile(patron,re.DOTALL).findall(data) if len(matches)>0: scrapertools.printMatches(matches) for match in matches: videourl = match videourl = videourl.replace('%5C','') videourl = urllib.unquote(videourl) video_urls.append( [ ".mp4 [Downupload]" , videourl ] ) else: # Si es un enlace de Descarga se busca el archivo patron = '<div id="player_code">.*?value[\W]name[\W]param[\W]com[\W]http[\W]false[\W](.*?)[\W]divx[\W]previewImage[\W].*?[\W]custommode[\W](.*?)[\W](.*?)[\W](.*?)[\W]src' matches = re.compile(patron,re.DOTALL).findall(data) scrapertools.printMatches(matches) for match in matches: videourl = "http://"+match[0]+".com:"+match[3]+"/d/"+match[2]+"/video."+match[1] videourl = videourl.replace('|','.') videourl = urllib.unquote(videourl) video_urls.append( [ "."+match[1]+" [Downupload]" , videourl ] ) # Localiza enlaces con IP if len(matches)==0: patron = '<div id="player_code">.*?value[\W]name[\W]param[\W]http[\W]false[\W](.*?)[\W](.*?)[\W](.*?)[\W](.*?)[\W]divx[\W]previewImage[\W].*?[\W]custommode[\W](.*?)[\W](.*?)[\W](.*?)[\W]src' matches = re.compile(patron,re.DOTALL).findall(data) scrapertools.printMatches(matches) for match in matches: videourl = "http://"+match[3]+"."+match[2]+"."+match[1]+"."+match[0]+":"+match[6]+"/d/"+match[5]+"/video."+match[4] videourl = videourl.replace('|','') videourl = urllib.unquote(videourl) video_urls.append( [ "."+match[4]+" [Downupload]" , videourl ] ) # Otro metodo de busqueda if len(matches)==0: url = unpackerjs.unpackjs(data) logger.info("[unpackerjs.py] "+url) patron = 'src"value="([^"]+)"' matches = re.compile(patron,re.DOTALL).findall(url) for match in matches: videourl = match videourl = videourl.replace('|','') videourl = urllib.unquote(videourl) video_urls.append( [ "."+videourl.rsplit('.',1)[1]+" [Downupload]" , videourl ] ) for video_url in video_urls: logger.info("[downupload.py] %s - %s" % (video_url[0],video_url[1])) return video_urls # Encuentra vídeos del servidor en el texto pasado def find_videos(data): encontrados = set() devuelve = [] # Downupload http://www.downupload.com/embed-p9oenzlz6xhu.html patronvideos = '(downupload.com/embed-.*?\.html)' logger.info("[downupload.py] find_videos #"+patronvideos+"#") matches = re.compile(patronvideos,re.DOTALL).findall(data) for match in matches: titulo = "[Downupload]" url = "http://www."+match if url not in encontrados: logger.info(" url="+url) devuelve.append( [ titulo , url , 'downupload' ] ) encontrados.add(url) else: logger.info(" url duplicada="+url) # Enlaces de Descarga if len(matches)==0: patronvideos = '(downupload.com/[\w]+)' logger.info("[downupload.py] find_videos #"+patronvideos+"#") matches = re.compile(patronvideos,re.DOTALL).findall(data) for match in matches: titulo = "[Downupload]" url = match.replace("downupload.com/","http://www.downupload.com/embed-") url = url+".html" if url not in encontrados: logger.info(" url="+url) devuelve.append( [ titulo , url , 'downupload' ] ) encontrados.add(url) else: logger.info(" url duplicada="+url) return devuelve
gpl-2.0
unomena/tunobase
tunobase/social_media/facebook/templatetags/facebook_widgets.py
1
1152
''' Created on 08 Nov 2013 @author: michael ''' from copy import copy from django import template from django.conf import settings from django.contrib.sites.models import Site from django.core.urlresolvers import reverse_lazy from django.utils.http import quote import facebook register = template.Library() @register.inclusion_tag('facebook/inclusion_tags/facebook_login_widget.html', takes_context=True) def facebook_login_widget(context, perms=None, login_button_text='Login with Facebook'): context = copy(context) login_redirect_uri = 'http://%s%s' % ( Site.objects.get_current().domain, reverse_lazy('facebook_login_callback') ) csrf_token = unicode(context['csrf_token']) context['request'].session['facebook_state'] = csrf_token if perms: perms = ['email'] + perms.split(',') else: perms = ['email'] context.update({ 'auth_url': quote(facebook.auth_url( settings.FACEBOOK_APP_ID, login_redirect_uri, perms, csrf_token )), 'login_button_text': login_button_text }) return context
bsd-3-clause
alphafoobar/intellij-community
plugins/hg4idea/testData/bin/hgext/mq.py
90
134782
# mq.py - patch queues for mercurial # # Copyright 2005, 2006 Chris Mason <mason@suse.com> # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. '''manage a stack of patches This extension lets you work with a stack of patches in a Mercurial repository. It manages two stacks of patches - all known patches, and applied patches (subset of known patches). Known patches are represented as patch files in the .hg/patches directory. Applied patches are both patch files and changesets. Common tasks (use :hg:`help command` for more details):: create new patch qnew import existing patch qimport print patch series qseries print applied patches qapplied add known patch to applied stack qpush remove patch from applied stack qpop refresh contents of top applied patch qrefresh By default, mq will automatically use git patches when required to avoid losing file mode changes, copy records, binary files or empty files creations or deletions. This behaviour can be configured with:: [mq] git = auto/keep/yes/no If set to 'keep', mq will obey the [diff] section configuration while preserving existing git patches upon qrefresh. If set to 'yes' or 'no', mq will override the [diff] section and always generate git or regular patches, possibly losing data in the second case. It may be desirable for mq changesets to be kept in the secret phase (see :hg:`help phases`), which can be enabled with the following setting:: [mq] secret = True You will by default be managing a patch queue named "patches". You can create other, independent patch queues with the :hg:`qqueue` command. If the working directory contains uncommitted files, qpush, qpop and qgoto abort immediately. If -f/--force is used, the changes are discarded. Setting:: [mq] keepchanges = True make them behave as if --keep-changes were passed, and non-conflicting local changes will be tolerated and preserved. If incompatible options such as -f/--force or --exact are passed, this setting is ignored. ''' from mercurial.i18n import _ from mercurial.node import bin, hex, short, nullid, nullrev from mercurial.lock import release from mercurial import commands, cmdutil, hg, scmutil, util, revset from mercurial import repair, extensions, error, phases from mercurial import patch as patchmod import os, re, errno, shutil commands.norepo += " qclone" seriesopts = [('s', 'summary', None, _('print first line of patch header'))] cmdtable = {} command = cmdutil.command(cmdtable) testedwith = 'internal' # Patch names looks like unix-file names. # They must be joinable with queue directory and result in the patch path. normname = util.normpath class statusentry(object): def __init__(self, node, name): self.node, self.name = node, name def __repr__(self): return hex(self.node) + ':' + self.name class patchheader(object): def __init__(self, pf, plainmode=False): def eatdiff(lines): while lines: l = lines[-1] if (l.startswith("diff -") or l.startswith("Index:") or l.startswith("===========")): del lines[-1] else: break def eatempty(lines): while lines: if not lines[-1].strip(): del lines[-1] else: break message = [] comments = [] user = None date = None parent = None format = None subject = None branch = None nodeid = None diffstart = 0 for line in file(pf): line = line.rstrip() if (line.startswith('diff --git') or (diffstart and line.startswith('+++ '))): diffstart = 2 break diffstart = 0 # reset if line.startswith("--- "): diffstart = 1 continue elif format == "hgpatch": # parse values when importing the result of an hg export if line.startswith("# User "): user = line[7:] elif line.startswith("# Date "): date = line[7:] elif line.startswith("# Parent "): parent = line[9:].lstrip() elif line.startswith("# Branch "): branch = line[9:] elif line.startswith("# Node ID "): nodeid = line[10:] elif not line.startswith("# ") and line: message.append(line) format = None elif line == '# HG changeset patch': message = [] format = "hgpatch" elif (format != "tagdone" and (line.startswith("Subject: ") or line.startswith("subject: "))): subject = line[9:] format = "tag" elif (format != "tagdone" and (line.startswith("From: ") or line.startswith("from: "))): user = line[6:] format = "tag" elif (format != "tagdone" and (line.startswith("Date: ") or line.startswith("date: "))): date = line[6:] format = "tag" elif format == "tag" and line == "": # when looking for tags (subject: from: etc) they # end once you find a blank line in the source format = "tagdone" elif message or line: message.append(line) comments.append(line) eatdiff(message) eatdiff(comments) # Remember the exact starting line of the patch diffs before consuming # empty lines, for external use by TortoiseHg and others self.diffstartline = len(comments) eatempty(message) eatempty(comments) # make sure message isn't empty if format and format.startswith("tag") and subject: message.insert(0, "") message.insert(0, subject) self.message = message self.comments = comments self.user = user self.date = date self.parent = parent # nodeid and branch are for external use by TortoiseHg and others self.nodeid = nodeid self.branch = branch self.haspatch = diffstart > 1 self.plainmode = plainmode def setuser(self, user): if not self.updateheader(['From: ', '# User '], user): try: patchheaderat = self.comments.index('# HG changeset patch') self.comments.insert(patchheaderat + 1, '# User ' + user) except ValueError: if self.plainmode or self._hasheader(['Date: ']): self.comments = ['From: ' + user] + self.comments else: tmp = ['# HG changeset patch', '# User ' + user, ''] self.comments = tmp + self.comments self.user = user def setdate(self, date): if not self.updateheader(['Date: ', '# Date '], date): try: patchheaderat = self.comments.index('# HG changeset patch') self.comments.insert(patchheaderat + 1, '# Date ' + date) except ValueError: if self.plainmode or self._hasheader(['From: ']): self.comments = ['Date: ' + date] + self.comments else: tmp = ['# HG changeset patch', '# Date ' + date, ''] self.comments = tmp + self.comments self.date = date def setparent(self, parent): if not self.updateheader(['# Parent '], parent): try: patchheaderat = self.comments.index('# HG changeset patch') self.comments.insert(patchheaderat + 1, '# Parent ' + parent) except ValueError: pass self.parent = parent def setmessage(self, message): if self.comments: self._delmsg() self.message = [message] self.comments += self.message def updateheader(self, prefixes, new): '''Update all references to a field in the patch header. Return whether the field is present.''' res = False for prefix in prefixes: for i in xrange(len(self.comments)): if self.comments[i].startswith(prefix): self.comments[i] = prefix + new res = True break return res def _hasheader(self, prefixes): '''Check if a header starts with any of the given prefixes.''' for prefix in prefixes: for comment in self.comments: if comment.startswith(prefix): return True return False def __str__(self): if not self.comments: return '' return '\n'.join(self.comments) + '\n\n' def _delmsg(self): '''Remove existing message, keeping the rest of the comments fields. If comments contains 'subject: ', message will prepend the field and a blank line.''' if self.message: subj = 'subject: ' + self.message[0].lower() for i in xrange(len(self.comments)): if subj == self.comments[i].lower(): del self.comments[i] self.message = self.message[2:] break ci = 0 for mi in self.message: while mi != self.comments[ci]: ci += 1 del self.comments[ci] def newcommit(repo, phase, *args, **kwargs): """helper dedicated to ensure a commit respect mq.secret setting It should be used instead of repo.commit inside the mq source for operation creating new changeset. """ repo = repo.unfiltered() if phase is None: if repo.ui.configbool('mq', 'secret', False): phase = phases.secret if phase is not None: backup = repo.ui.backupconfig('phases', 'new-commit') # Marking the repository as committing an mq patch can be used # to optimize operations like branchtags(). repo._committingpatch = True try: if phase is not None: repo.ui.setconfig('phases', 'new-commit', phase) return repo.commit(*args, **kwargs) finally: repo._committingpatch = False if phase is not None: repo.ui.restoreconfig(backup) class AbortNoCleanup(error.Abort): pass class queue(object): def __init__(self, ui, baseui, path, patchdir=None): self.basepath = path try: fh = open(os.path.join(path, 'patches.queue')) cur = fh.read().rstrip() fh.close() if not cur: curpath = os.path.join(path, 'patches') else: curpath = os.path.join(path, 'patches-' + cur) except IOError: curpath = os.path.join(path, 'patches') self.path = patchdir or curpath self.opener = scmutil.opener(self.path) self.ui = ui self.baseui = baseui self.applieddirty = False self.seriesdirty = False self.added = [] self.seriespath = "series" self.statuspath = "status" self.guardspath = "guards" self.activeguards = None self.guardsdirty = False # Handle mq.git as a bool with extended values try: gitmode = ui.configbool('mq', 'git', None) if gitmode is None: raise error.ConfigError self.gitmode = gitmode and 'yes' or 'no' except error.ConfigError: self.gitmode = ui.config('mq', 'git', 'auto').lower() self.plainmode = ui.configbool('mq', 'plain', False) @util.propertycache def applied(self): def parselines(lines): for l in lines: entry = l.split(':', 1) if len(entry) > 1: n, name = entry yield statusentry(bin(n), name) elif l.strip(): self.ui.warn(_('malformated mq status line: %s\n') % entry) # else we ignore empty lines try: lines = self.opener.read(self.statuspath).splitlines() return list(parselines(lines)) except IOError, e: if e.errno == errno.ENOENT: return [] raise @util.propertycache def fullseries(self): try: return self.opener.read(self.seriespath).splitlines() except IOError, e: if e.errno == errno.ENOENT: return [] raise @util.propertycache def series(self): self.parseseries() return self.series @util.propertycache def seriesguards(self): self.parseseries() return self.seriesguards def invalidate(self): for a in 'applied fullseries series seriesguards'.split(): if a in self.__dict__: delattr(self, a) self.applieddirty = False self.seriesdirty = False self.guardsdirty = False self.activeguards = None def diffopts(self, opts={}, patchfn=None): diffopts = patchmod.diffopts(self.ui, opts) if self.gitmode == 'auto': diffopts.upgrade = True elif self.gitmode == 'keep': pass elif self.gitmode in ('yes', 'no'): diffopts.git = self.gitmode == 'yes' else: raise util.Abort(_('mq.git option can be auto/keep/yes/no' ' got %s') % self.gitmode) if patchfn: diffopts = self.patchopts(diffopts, patchfn) return diffopts def patchopts(self, diffopts, *patches): """Return a copy of input diff options with git set to true if referenced patch is a git patch and should be preserved as such. """ diffopts = diffopts.copy() if not diffopts.git and self.gitmode == 'keep': for patchfn in patches: patchf = self.opener(patchfn, 'r') # if the patch was a git patch, refresh it as a git patch for line in patchf: if line.startswith('diff --git'): diffopts.git = True break patchf.close() return diffopts def join(self, *p): return os.path.join(self.path, *p) def findseries(self, patch): def matchpatch(l): l = l.split('#', 1)[0] return l.strip() == patch for index, l in enumerate(self.fullseries): if matchpatch(l): return index return None guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)') def parseseries(self): self.series = [] self.seriesguards = [] for l in self.fullseries: h = l.find('#') if h == -1: patch = l comment = '' elif h == 0: continue else: patch = l[:h] comment = l[h:] patch = patch.strip() if patch: if patch in self.series: raise util.Abort(_('%s appears more than once in %s') % (patch, self.join(self.seriespath))) self.series.append(patch) self.seriesguards.append(self.guard_re.findall(comment)) def checkguard(self, guard): if not guard: return _('guard cannot be an empty string') bad_chars = '# \t\r\n\f' first = guard[0] if first in '-+': return (_('guard %r starts with invalid character: %r') % (guard, first)) for c in bad_chars: if c in guard: return _('invalid character in guard %r: %r') % (guard, c) def setactive(self, guards): for guard in guards: bad = self.checkguard(guard) if bad: raise util.Abort(bad) guards = sorted(set(guards)) self.ui.debug('active guards: %s\n' % ' '.join(guards)) self.activeguards = guards self.guardsdirty = True def active(self): if self.activeguards is None: self.activeguards = [] try: guards = self.opener.read(self.guardspath).split() except IOError, err: if err.errno != errno.ENOENT: raise guards = [] for i, guard in enumerate(guards): bad = self.checkguard(guard) if bad: self.ui.warn('%s:%d: %s\n' % (self.join(self.guardspath), i + 1, bad)) else: self.activeguards.append(guard) return self.activeguards def setguards(self, idx, guards): for g in guards: if len(g) < 2: raise util.Abort(_('guard %r too short') % g) if g[0] not in '-+': raise util.Abort(_('guard %r starts with invalid char') % g) bad = self.checkguard(g[1:]) if bad: raise util.Abort(bad) drop = self.guard_re.sub('', self.fullseries[idx]) self.fullseries[idx] = drop + ''.join([' #' + g for g in guards]) self.parseseries() self.seriesdirty = True def pushable(self, idx): if isinstance(idx, str): idx = self.series.index(idx) patchguards = self.seriesguards[idx] if not patchguards: return True, None guards = self.active() exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards] if exactneg: return False, repr(exactneg[0]) pos = [g for g in patchguards if g[0] == '+'] exactpos = [g for g in pos if g[1:] in guards] if pos: if exactpos: return True, repr(exactpos[0]) return False, ' '.join(map(repr, pos)) return True, '' def explainpushable(self, idx, all_patches=False): write = all_patches and self.ui.write or self.ui.warn if all_patches or self.ui.verbose: if isinstance(idx, str): idx = self.series.index(idx) pushable, why = self.pushable(idx) if all_patches and pushable: if why is None: write(_('allowing %s - no guards in effect\n') % self.series[idx]) else: if not why: write(_('allowing %s - no matching negative guards\n') % self.series[idx]) else: write(_('allowing %s - guarded by %s\n') % (self.series[idx], why)) if not pushable: if why: write(_('skipping %s - guarded by %s\n') % (self.series[idx], why)) else: write(_('skipping %s - no matching guards\n') % self.series[idx]) def savedirty(self): def writelist(items, path): fp = self.opener(path, 'w') for i in items: fp.write("%s\n" % i) fp.close() if self.applieddirty: writelist(map(str, self.applied), self.statuspath) self.applieddirty = False if self.seriesdirty: writelist(self.fullseries, self.seriespath) self.seriesdirty = False if self.guardsdirty: writelist(self.activeguards, self.guardspath) self.guardsdirty = False if self.added: qrepo = self.qrepo() if qrepo: qrepo[None].add(f for f in self.added if f not in qrepo[None]) self.added = [] def removeundo(self, repo): undo = repo.sjoin('undo') if not os.path.exists(undo): return try: os.unlink(undo) except OSError, inst: self.ui.warn(_('error removing undo: %s\n') % str(inst)) def backup(self, repo, files, copy=False): # backup local changes in --force case for f in sorted(files): absf = repo.wjoin(f) if os.path.lexists(absf): self.ui.note(_('saving current version of %s as %s\n') % (f, f + '.orig')) if copy: util.copyfile(absf, absf + '.orig') else: util.rename(absf, absf + '.orig') def printdiff(self, repo, diffopts, node1, node2=None, files=None, fp=None, changes=None, opts={}): stat = opts.get('stat') m = scmutil.match(repo[node1], files, opts) cmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m, changes, stat, fp) def mergeone(self, repo, mergeq, head, patch, rev, diffopts): # first try just applying the patch (err, n) = self.apply(repo, [patch], update_status=False, strict=True, merge=rev) if err == 0: return (err, n) if n is None: raise util.Abort(_("apply failed for patch %s") % patch) self.ui.warn(_("patch didn't work out, merging %s\n") % patch) # apply failed, strip away that rev and merge. hg.clean(repo, head) self.strip(repo, [n], update=False, backup='strip') ctx = repo[rev] ret = hg.merge(repo, rev) if ret: raise util.Abort(_("update returned %d") % ret) n = newcommit(repo, None, ctx.description(), ctx.user(), force=True) if n is None: raise util.Abort(_("repo commit failed")) try: ph = patchheader(mergeq.join(patch), self.plainmode) except Exception: raise util.Abort(_("unable to read %s") % patch) diffopts = self.patchopts(diffopts, patch) patchf = self.opener(patch, "w") comments = str(ph) if comments: patchf.write(comments) self.printdiff(repo, diffopts, head, n, fp=patchf) patchf.close() self.removeundo(repo) return (0, n) def qparents(self, repo, rev=None): if rev is None: (p1, p2) = repo.dirstate.parents() if p2 == nullid: return p1 if not self.applied: return None return self.applied[-1].node p1, p2 = repo.changelog.parents(rev) if p2 != nullid and p2 in [x.node for x in self.applied]: return p2 return p1 def mergepatch(self, repo, mergeq, series, diffopts): if not self.applied: # each of the patches merged in will have two parents. This # can confuse the qrefresh, qdiff, and strip code because it # needs to know which parent is actually in the patch queue. # so, we insert a merge marker with only one parent. This way # the first patch in the queue is never a merge patch # pname = ".hg.patches.merge.marker" n = newcommit(repo, None, '[mq]: merge marker', force=True) self.removeundo(repo) self.applied.append(statusentry(n, pname)) self.applieddirty = True head = self.qparents(repo) for patch in series: patch = mergeq.lookup(patch, strict=True) if not patch: self.ui.warn(_("patch %s does not exist\n") % patch) return (1, None) pushable, reason = self.pushable(patch) if not pushable: self.explainpushable(patch, all_patches=True) continue info = mergeq.isapplied(patch) if not info: self.ui.warn(_("patch %s is not applied\n") % patch) return (1, None) rev = info[1] err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts) if head: self.applied.append(statusentry(head, patch)) self.applieddirty = True if err: return (err, head) self.savedirty() return (0, head) def patch(self, repo, patchfile): '''Apply patchfile to the working directory. patchfile: name of patch file''' files = set() try: fuzz = patchmod.patch(self.ui, repo, patchfile, strip=1, files=files, eolmode=None) return (True, list(files), fuzz) except Exception, inst: self.ui.note(str(inst) + '\n') if not self.ui.verbose: self.ui.warn(_("patch failed, unable to continue (try -v)\n")) self.ui.traceback() return (False, list(files), False) def apply(self, repo, series, list=False, update_status=True, strict=False, patchdir=None, merge=None, all_files=None, tobackup=None, keepchanges=False): wlock = lock = tr = None try: wlock = repo.wlock() lock = repo.lock() tr = repo.transaction("qpush") try: ret = self._apply(repo, series, list, update_status, strict, patchdir, merge, all_files=all_files, tobackup=tobackup, keepchanges=keepchanges) tr.close() self.savedirty() return ret except AbortNoCleanup: tr.close() self.savedirty() return 2, repo.dirstate.p1() except: # re-raises try: tr.abort() finally: repo.invalidate() repo.dirstate.invalidate() self.invalidate() raise finally: release(tr, lock, wlock) self.removeundo(repo) def _apply(self, repo, series, list=False, update_status=True, strict=False, patchdir=None, merge=None, all_files=None, tobackup=None, keepchanges=False): """returns (error, hash) error = 1 for unable to read, 2 for patch failed, 3 for patch fuzz. tobackup is None or a set of files to backup before they are modified by a patch. """ # TODO unify with commands.py if not patchdir: patchdir = self.path err = 0 n = None for patchname in series: pushable, reason = self.pushable(patchname) if not pushable: self.explainpushable(patchname, all_patches=True) continue self.ui.status(_("applying %s\n") % patchname) pf = os.path.join(patchdir, patchname) try: ph = patchheader(self.join(patchname), self.plainmode) except IOError: self.ui.warn(_("unable to read %s\n") % patchname) err = 1 break message = ph.message if not message: # The commit message should not be translated message = "imported patch %s\n" % patchname else: if list: # The commit message should not be translated message.append("\nimported patch %s" % patchname) message = '\n'.join(message) if ph.haspatch: if tobackup: touched = patchmod.changedfiles(self.ui, repo, pf) touched = set(touched) & tobackup if touched and keepchanges: raise AbortNoCleanup( _("local changes found, refresh first")) self.backup(repo, touched, copy=True) tobackup = tobackup - touched (patcherr, files, fuzz) = self.patch(repo, pf) if all_files is not None: all_files.update(files) patcherr = not patcherr else: self.ui.warn(_("patch %s is empty\n") % patchname) patcherr, files, fuzz = 0, [], 0 if merge and files: # Mark as removed/merged and update dirstate parent info removed = [] merged = [] for f in files: if os.path.lexists(repo.wjoin(f)): merged.append(f) else: removed.append(f) for f in removed: repo.dirstate.remove(f) for f in merged: repo.dirstate.merge(f) p1, p2 = repo.dirstate.parents() repo.setparents(p1, merge) match = scmutil.matchfiles(repo, files or []) oldtip = repo['tip'] n = newcommit(repo, None, message, ph.user, ph.date, match=match, force=True) if repo['tip'] == oldtip: raise util.Abort(_("qpush exactly duplicates child changeset")) if n is None: raise util.Abort(_("repository commit failed")) if update_status: self.applied.append(statusentry(n, patchname)) if patcherr: self.ui.warn(_("patch failed, rejects left in working dir\n")) err = 2 break if fuzz and strict: self.ui.warn(_("fuzz found when applying patch, stopping\n")) err = 3 break return (err, n) def _cleanup(self, patches, numrevs, keep=False): if not keep: r = self.qrepo() if r: r[None].forget(patches) for p in patches: try: os.unlink(self.join(p)) except OSError, inst: if inst.errno != errno.ENOENT: raise qfinished = [] if numrevs: qfinished = self.applied[:numrevs] del self.applied[:numrevs] self.applieddirty = True unknown = [] for (i, p) in sorted([(self.findseries(p), p) for p in patches], reverse=True): if i is not None: del self.fullseries[i] else: unknown.append(p) if unknown: if numrevs: rev = dict((entry.name, entry.node) for entry in qfinished) for p in unknown: msg = _('revision %s refers to unknown patches: %s\n') self.ui.warn(msg % (short(rev[p]), p)) else: msg = _('unknown patches: %s\n') raise util.Abort(''.join(msg % p for p in unknown)) self.parseseries() self.seriesdirty = True return [entry.node for entry in qfinished] def _revpatches(self, repo, revs): firstrev = repo[self.applied[0].node].rev() patches = [] for i, rev in enumerate(revs): if rev < firstrev: raise util.Abort(_('revision %d is not managed') % rev) ctx = repo[rev] base = self.applied[i].node if ctx.node() != base: msg = _('cannot delete revision %d above applied patches') raise util.Abort(msg % rev) patch = self.applied[i].name for fmt in ('[mq]: %s', 'imported patch %s'): if ctx.description() == fmt % patch: msg = _('patch %s finalized without changeset message\n') repo.ui.status(msg % patch) break patches.append(patch) return patches def finish(self, repo, revs): # Manually trigger phase computation to ensure phasedefaults is # executed before we remove the patches. repo._phasecache patches = self._revpatches(repo, sorted(revs)) qfinished = self._cleanup(patches, len(patches)) if qfinished and repo.ui.configbool('mq', 'secret', False): # only use this logic when the secret option is added oldqbase = repo[qfinished[0]] tphase = repo.ui.config('phases', 'new-commit', phases.draft) if oldqbase.phase() > tphase and oldqbase.p1().phase() <= tphase: phases.advanceboundary(repo, tphase, qfinished) def delete(self, repo, patches, opts): if not patches and not opts.get('rev'): raise util.Abort(_('qdelete requires at least one revision or ' 'patch name')) realpatches = [] for patch in patches: patch = self.lookup(patch, strict=True) info = self.isapplied(patch) if info: raise util.Abort(_("cannot delete applied patch %s") % patch) if patch not in self.series: raise util.Abort(_("patch %s not in series file") % patch) if patch not in realpatches: realpatches.append(patch) numrevs = 0 if opts.get('rev'): if not self.applied: raise util.Abort(_('no patches applied')) revs = scmutil.revrange(repo, opts.get('rev')) if len(revs) > 1 and revs[0] > revs[1]: revs.reverse() revpatches = self._revpatches(repo, revs) realpatches += revpatches numrevs = len(revpatches) self._cleanup(realpatches, numrevs, opts.get('keep')) def checktoppatch(self, repo): '''check that working directory is at qtip''' if self.applied: top = self.applied[-1].node patch = self.applied[-1].name if repo.dirstate.p1() != top: raise util.Abort(_("working directory revision is not qtip")) return top, patch return None, None def checksubstate(self, repo, baserev=None): '''return list of subrepos at a different revision than substate. Abort if any subrepos have uncommitted changes.''' inclsubs = [] wctx = repo[None] if baserev: bctx = repo[baserev] else: bctx = wctx.parents()[0] for s in sorted(wctx.substate): if wctx.sub(s).dirty(True): raise util.Abort( _("uncommitted changes in subrepository %s") % s) elif s not in bctx.substate or bctx.sub(s).dirty(): inclsubs.append(s) return inclsubs def putsubstate2changes(self, substatestate, changes): for files in changes[:3]: if '.hgsubstate' in files: return # already listed up # not yet listed up if substatestate in 'a?': changes[1].append('.hgsubstate') elif substatestate in 'r': changes[2].append('.hgsubstate') else: # modified changes[0].append('.hgsubstate') def localchangesfound(self, refresh=True): if refresh: raise util.Abort(_("local changes found, refresh first")) else: raise util.Abort(_("local changes found")) def checklocalchanges(self, repo, force=False, refresh=True): m, a, r, d = repo.status()[:4] if (m or a or r or d) and not force: self.localchangesfound(refresh) return m, a, r, d _reserved = ('series', 'status', 'guards', '.', '..') def checkreservedname(self, name): if name in self._reserved: raise util.Abort(_('"%s" cannot be used as the name of a patch') % name) for prefix in ('.hg', '.mq'): if name.startswith(prefix): raise util.Abort(_('patch name cannot begin with "%s"') % prefix) for c in ('#', ':'): if c in name: raise util.Abort(_('"%s" cannot be used in the name of a patch') % c) def checkpatchname(self, name, force=False): self.checkreservedname(name) if not force and os.path.exists(self.join(name)): if os.path.isdir(self.join(name)): raise util.Abort(_('"%s" already exists as a directory') % name) else: raise util.Abort(_('patch "%s" already exists') % name) def checkkeepchanges(self, keepchanges, force): if force and keepchanges: raise util.Abort(_('cannot use both --force and --keep-changes')) def new(self, repo, patchfn, *pats, **opts): """options: msg: a string or a no-argument function returning a string """ msg = opts.get('msg') user = opts.get('user') date = opts.get('date') if date: date = util.parsedate(date) diffopts = self.diffopts({'git': opts.get('git')}) if opts.get('checkname', True): self.checkpatchname(patchfn) inclsubs = self.checksubstate(repo) if inclsubs: inclsubs.append('.hgsubstate') substatestate = repo.dirstate['.hgsubstate'] if opts.get('include') or opts.get('exclude') or pats: if inclsubs: pats = list(pats or []) + inclsubs match = scmutil.match(repo[None], pats, opts) # detect missing files in pats def badfn(f, msg): if f != '.hgsubstate': # .hgsubstate is auto-created raise util.Abort('%s: %s' % (f, msg)) match.bad = badfn changes = repo.status(match=match) m, a, r, d = changes[:4] else: changes = self.checklocalchanges(repo, force=True) m, a, r, d = changes match = scmutil.matchfiles(repo, m + a + r + inclsubs) if len(repo[None].parents()) > 1: raise util.Abort(_('cannot manage merge changesets')) commitfiles = m + a + r self.checktoppatch(repo) insert = self.fullseriesend() wlock = repo.wlock() try: try: # if patch file write fails, abort early p = self.opener(patchfn, "w") except IOError, e: raise util.Abort(_('cannot write patch "%s": %s') % (patchfn, e.strerror)) try: if self.plainmode: if user: p.write("From: " + user + "\n") if not date: p.write("\n") if date: p.write("Date: %d %d\n\n" % date) else: p.write("# HG changeset patch\n") p.write("# Parent " + hex(repo[None].p1().node()) + "\n") if user: p.write("# User " + user + "\n") if date: p.write("# Date %s %s\n\n" % date) if util.safehasattr(msg, '__call__'): msg = msg() commitmsg = msg and msg or ("[mq]: %s" % patchfn) n = newcommit(repo, None, commitmsg, user, date, match=match, force=True) if n is None: raise util.Abort(_("repo commit failed")) try: self.fullseries[insert:insert] = [patchfn] self.applied.append(statusentry(n, patchfn)) self.parseseries() self.seriesdirty = True self.applieddirty = True if msg: msg = msg + "\n\n" p.write(msg) if commitfiles: parent = self.qparents(repo, n) if inclsubs: self.putsubstate2changes(substatestate, changes) chunks = patchmod.diff(repo, node1=parent, node2=n, changes=changes, opts=diffopts) for chunk in chunks: p.write(chunk) p.close() r = self.qrepo() if r: r[None].add([patchfn]) except: # re-raises repo.rollback() raise except Exception: patchpath = self.join(patchfn) try: os.unlink(patchpath) except OSError: self.ui.warn(_('error unlinking %s\n') % patchpath) raise self.removeundo(repo) finally: release(wlock) def strip(self, repo, revs, update=True, backup="all", force=None): wlock = lock = None try: wlock = repo.wlock() lock = repo.lock() if update: self.checklocalchanges(repo, force=force, refresh=False) urev = self.qparents(repo, revs[0]) hg.clean(repo, urev) repo.dirstate.write() repair.strip(self.ui, repo, revs, backup) finally: release(lock, wlock) def isapplied(self, patch): """returns (index, rev, patch)""" for i, a in enumerate(self.applied): if a.name == patch: return (i, a.node, a.name) return None # if the exact patch name does not exist, we try a few # variations. If strict is passed, we try only #1 # # 1) a number (as string) to indicate an offset in the series file # 2) a unique substring of the patch name was given # 3) patchname[-+]num to indicate an offset in the series file def lookup(self, patch, strict=False): def partialname(s): if s in self.series: return s matches = [x for x in self.series if s in x] if len(matches) > 1: self.ui.warn(_('patch name "%s" is ambiguous:\n') % s) for m in matches: self.ui.warn(' %s\n' % m) return None if matches: return matches[0] if self.series and self.applied: if s == 'qtip': return self.series[self.seriesend(True) - 1] if s == 'qbase': return self.series[0] return None if patch in self.series: return patch if not os.path.isfile(self.join(patch)): try: sno = int(patch) except (ValueError, OverflowError): pass else: if -len(self.series) <= sno < len(self.series): return self.series[sno] if not strict: res = partialname(patch) if res: return res minus = patch.rfind('-') if minus >= 0: res = partialname(patch[:minus]) if res: i = self.series.index(res) try: off = int(patch[minus + 1:] or 1) except (ValueError, OverflowError): pass else: if i - off >= 0: return self.series[i - off] plus = patch.rfind('+') if plus >= 0: res = partialname(patch[:plus]) if res: i = self.series.index(res) try: off = int(patch[plus + 1:] or 1) except (ValueError, OverflowError): pass else: if i + off < len(self.series): return self.series[i + off] raise util.Abort(_("patch %s not in series") % patch) def push(self, repo, patch=None, force=False, list=False, mergeq=None, all=False, move=False, exact=False, nobackup=False, keepchanges=False): self.checkkeepchanges(keepchanges, force) diffopts = self.diffopts() wlock = repo.wlock() try: heads = [] for b, ls in repo.branchmap().iteritems(): heads += ls if not heads: heads = [nullid] if repo.dirstate.p1() not in heads and not exact: self.ui.status(_("(working directory not at a head)\n")) if not self.series: self.ui.warn(_('no patches in series\n')) return 0 # Suppose our series file is: A B C and the current 'top' # patch is B. qpush C should be performed (moving forward) # qpush B is a NOP (no change) qpush A is an error (can't # go backwards with qpush) if patch: patch = self.lookup(patch) info = self.isapplied(patch) if info and info[0] >= len(self.applied) - 1: self.ui.warn( _('qpush: %s is already at the top\n') % patch) return 0 pushable, reason = self.pushable(patch) if pushable: if self.series.index(patch) < self.seriesend(): raise util.Abort( _("cannot push to a previous patch: %s") % patch) else: if reason: reason = _('guarded by %s') % reason else: reason = _('no matching guards') self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason)) return 1 elif all: patch = self.series[-1] if self.isapplied(patch): self.ui.warn(_('all patches are currently applied\n')) return 0 # Following the above example, starting at 'top' of B: # qpush should be performed (pushes C), but a subsequent # qpush without an argument is an error (nothing to # apply). This allows a loop of "...while hg qpush..." to # work as it detects an error when done start = self.seriesend() if start == len(self.series): self.ui.warn(_('patch series already fully applied\n')) return 1 if not force and not keepchanges: self.checklocalchanges(repo, refresh=self.applied) if exact: if keepchanges: raise util.Abort( _("cannot use --exact and --keep-changes together")) if move: raise util.Abort(_('cannot use --exact and --move ' 'together')) if self.applied: raise util.Abort(_('cannot push --exact with applied ' 'patches')) root = self.series[start] target = patchheader(self.join(root), self.plainmode).parent if not target: raise util.Abort( _("%s does not have a parent recorded") % root) if not repo[target] == repo['.']: hg.update(repo, target) if move: if not patch: raise util.Abort(_("please specify the patch to move")) for fullstart, rpn in enumerate(self.fullseries): # strip markers for patch guards if self.guard_re.split(rpn, 1)[0] == self.series[start]: break for i, rpn in enumerate(self.fullseries[fullstart:]): # strip markers for patch guards if self.guard_re.split(rpn, 1)[0] == patch: break index = fullstart + i assert index < len(self.fullseries) fullpatch = self.fullseries[index] del self.fullseries[index] self.fullseries.insert(fullstart, fullpatch) self.parseseries() self.seriesdirty = True self.applieddirty = True if start > 0: self.checktoppatch(repo) if not patch: patch = self.series[start] end = start + 1 else: end = self.series.index(patch, start) + 1 tobackup = set() if (not nobackup and force) or keepchanges: m, a, r, d = self.checklocalchanges(repo, force=True) if keepchanges: tobackup.update(m + a + r + d) else: tobackup.update(m + a) s = self.series[start:end] all_files = set() try: if mergeq: ret = self.mergepatch(repo, mergeq, s, diffopts) else: ret = self.apply(repo, s, list, all_files=all_files, tobackup=tobackup, keepchanges=keepchanges) except: # re-raises self.ui.warn(_('cleaning up working directory...')) node = repo.dirstate.p1() hg.revert(repo, node, None) # only remove unknown files that we know we touched or # created while patching for f in all_files: if f not in repo.dirstate: util.unlinkpath(repo.wjoin(f), ignoremissing=True) self.ui.warn(_('done\n')) raise if not self.applied: return ret[0] top = self.applied[-1].name if ret[0] and ret[0] > 1: msg = _("errors during apply, please fix and refresh %s\n") self.ui.write(msg % top) else: self.ui.write(_("now at: %s\n") % top) return ret[0] finally: wlock.release() def pop(self, repo, patch=None, force=False, update=True, all=False, nobackup=False, keepchanges=False): self.checkkeepchanges(keepchanges, force) wlock = repo.wlock() try: if patch: # index, rev, patch info = self.isapplied(patch) if not info: patch = self.lookup(patch) info = self.isapplied(patch) if not info: raise util.Abort(_("patch %s is not applied") % patch) if not self.applied: # Allow qpop -a to work repeatedly, # but not qpop without an argument self.ui.warn(_("no patches applied\n")) return not all if all: start = 0 elif patch: start = info[0] + 1 else: start = len(self.applied) - 1 if start >= len(self.applied): self.ui.warn(_("qpop: %s is already at the top\n") % patch) return if not update: parents = repo.dirstate.parents() rr = [x.node for x in self.applied] for p in parents: if p in rr: self.ui.warn(_("qpop: forcing dirstate update\n")) update = True else: parents = [p.node() for p in repo[None].parents()] needupdate = False for entry in self.applied[start:]: if entry.node in parents: needupdate = True break update = needupdate tobackup = set() if update: m, a, r, d = self.checklocalchanges( repo, force=force or keepchanges) if force: if not nobackup: tobackup.update(m + a) elif keepchanges: tobackup.update(m + a + r + d) self.applieddirty = True end = len(self.applied) rev = self.applied[start].node try: heads = repo.changelog.heads(rev) except error.LookupError: node = short(rev) raise util.Abort(_('trying to pop unknown node %s') % node) if heads != [self.applied[-1].node]: raise util.Abort(_("popping would remove a revision not " "managed by this patch queue")) if not repo[self.applied[-1].node].mutable(): raise util.Abort( _("popping would remove an immutable revision"), hint=_('see "hg help phases" for details')) # we know there are no local changes, so we can make a simplified # form of hg.update. if update: qp = self.qparents(repo, rev) ctx = repo[qp] m, a, r, d = repo.status(qp, '.')[:4] if d: raise util.Abort(_("deletions found between repo revs")) tobackup = set(a + m + r) & tobackup if keepchanges and tobackup: self.localchangesfound() self.backup(repo, tobackup) for f in a: util.unlinkpath(repo.wjoin(f), ignoremissing=True) repo.dirstate.drop(f) for f in m + r: fctx = ctx[f] repo.wwrite(f, fctx.data(), fctx.flags()) repo.dirstate.normal(f) repo.setparents(qp, nullid) for patch in reversed(self.applied[start:end]): self.ui.status(_("popping %s\n") % patch.name) del self.applied[start:end] self.strip(repo, [rev], update=False, backup='strip') if self.applied: self.ui.write(_("now at: %s\n") % self.applied[-1].name) else: self.ui.write(_("patch queue now empty\n")) finally: wlock.release() def diff(self, repo, pats, opts): top, patch = self.checktoppatch(repo) if not top: self.ui.write(_("no patches applied\n")) return qp = self.qparents(repo, top) if opts.get('reverse'): node1, node2 = None, qp else: node1, node2 = qp, None diffopts = self.diffopts(opts, patch) self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts) def refresh(self, repo, pats=None, **opts): if not self.applied: self.ui.write(_("no patches applied\n")) return 1 msg = opts.get('msg', '').rstrip() newuser = opts.get('user') newdate = opts.get('date') if newdate: newdate = '%d %d' % util.parsedate(newdate) wlock = repo.wlock() try: self.checktoppatch(repo) (top, patchfn) = (self.applied[-1].node, self.applied[-1].name) if repo.changelog.heads(top) != [top]: raise util.Abort(_("cannot refresh a revision with children")) if not repo[top].mutable(): raise util.Abort(_("cannot refresh immutable revision"), hint=_('see "hg help phases" for details')) cparents = repo.changelog.parents(top) patchparent = self.qparents(repo, top) inclsubs = self.checksubstate(repo, hex(patchparent)) if inclsubs: inclsubs.append('.hgsubstate') substatestate = repo.dirstate['.hgsubstate'] ph = patchheader(self.join(patchfn), self.plainmode) diffopts = self.diffopts({'git': opts.get('git')}, patchfn) if msg: ph.setmessage(msg) if newuser: ph.setuser(newuser) if newdate: ph.setdate(newdate) ph.setparent(hex(patchparent)) # only commit new patch when write is complete patchf = self.opener(patchfn, 'w', atomictemp=True) comments = str(ph) if comments: patchf.write(comments) # update the dirstate in place, strip off the qtip commit # and then commit. # # this should really read: # mm, dd, aa = repo.status(top, patchparent)[:3] # but we do it backwards to take advantage of manifest/changelog # caching against the next repo.status call mm, aa, dd = repo.status(patchparent, top)[:3] changes = repo.changelog.read(top) man = repo.manifest.read(changes[0]) aaa = aa[:] matchfn = scmutil.match(repo[None], pats, opts) # in short mode, we only diff the files included in the # patch already plus specified files if opts.get('short'): # if amending a patch, we start with existing # files plus specified files - unfiltered match = scmutil.matchfiles(repo, mm + aa + dd + matchfn.files()) # filter with include/exclude options matchfn = scmutil.match(repo[None], opts=opts) else: match = scmutil.matchall(repo) m, a, r, d = repo.status(match=match)[:4] mm = set(mm) aa = set(aa) dd = set(dd) # we might end up with files that were added between # qtip and the dirstate parent, but then changed in the # local dirstate. in this case, we want them to only # show up in the added section for x in m: if x not in aa: mm.add(x) # we might end up with files added by the local dirstate that # were deleted by the patch. In this case, they should only # show up in the changed section. for x in a: if x in dd: dd.remove(x) mm.add(x) else: aa.add(x) # make sure any files deleted in the local dirstate # are not in the add or change column of the patch forget = [] for x in d + r: if x in aa: aa.remove(x) forget.append(x) continue else: mm.discard(x) dd.add(x) m = list(mm) r = list(dd) a = list(aa) # create 'match' that includes the files to be recommitted. # apply matchfn via repo.status to ensure correct case handling. cm, ca, cr, cd = repo.status(patchparent, match=matchfn)[:4] allmatches = set(cm + ca + cr + cd) refreshchanges = [x.intersection(allmatches) for x in (mm, aa, dd)] files = set(inclsubs) for x in refreshchanges: files.update(x) match = scmutil.matchfiles(repo, files) bmlist = repo[top].bookmarks() try: if diffopts.git or diffopts.upgrade: copies = {} for dst in a: src = repo.dirstate.copied(dst) # during qfold, the source file for copies may # be removed. Treat this as a simple add. if src is not None and src in repo.dirstate: copies.setdefault(src, []).append(dst) repo.dirstate.add(dst) # remember the copies between patchparent and qtip for dst in aaa: f = repo.file(dst) src = f.renamed(man[dst]) if src: copies.setdefault(src[0], []).extend( copies.get(dst, [])) if dst in a: copies[src[0]].append(dst) # we can't copy a file created by the patch itself if dst in copies: del copies[dst] for src, dsts in copies.iteritems(): for dst in dsts: repo.dirstate.copy(src, dst) else: for dst in a: repo.dirstate.add(dst) # Drop useless copy information for f in list(repo.dirstate.copies()): repo.dirstate.copy(None, f) for f in r: repo.dirstate.remove(f) # if the patch excludes a modified file, mark that # file with mtime=0 so status can see it. mm = [] for i in xrange(len(m) - 1, -1, -1): if not matchfn(m[i]): mm.append(m[i]) del m[i] for f in m: repo.dirstate.normal(f) for f in mm: repo.dirstate.normallookup(f) for f in forget: repo.dirstate.drop(f) if not msg: if not ph.message: message = "[mq]: %s\n" % patchfn else: message = "\n".join(ph.message) else: message = msg user = ph.user or changes[1] oldphase = repo[top].phase() # assumes strip can roll itself back if interrupted repo.setparents(*cparents) self.applied.pop() self.applieddirty = True self.strip(repo, [top], update=False, backup='strip') except: # re-raises repo.dirstate.invalidate() raise try: # might be nice to attempt to roll back strip after this # Ensure we create a new changeset in the same phase than # the old one. n = newcommit(repo, oldphase, message, user, ph.date, match=match, force=True) # only write patch after a successful commit c = [list(x) for x in refreshchanges] if inclsubs: self.putsubstate2changes(substatestate, c) chunks = patchmod.diff(repo, patchparent, changes=c, opts=diffopts) for chunk in chunks: patchf.write(chunk) patchf.close() marks = repo._bookmarks for bm in bmlist: marks[bm] = n marks.write() self.applied.append(statusentry(n, patchfn)) except: # re-raises ctx = repo[cparents[0]] repo.dirstate.rebuild(ctx.node(), ctx.manifest()) self.savedirty() self.ui.warn(_('refresh interrupted while patch was popped! ' '(revert --all, qpush to recover)\n')) raise finally: wlock.release() self.removeundo(repo) def init(self, repo, create=False): if not create and os.path.isdir(self.path): raise util.Abort(_("patch queue directory already exists")) try: os.mkdir(self.path) except OSError, inst: if inst.errno != errno.EEXIST or not create: raise if create: return self.qrepo(create=True) def unapplied(self, repo, patch=None): if patch and patch not in self.series: raise util.Abort(_("patch %s is not in series file") % patch) if not patch: start = self.seriesend() else: start = self.series.index(patch) + 1 unapplied = [] for i in xrange(start, len(self.series)): pushable, reason = self.pushable(i) if pushable: unapplied.append((i, self.series[i])) self.explainpushable(i) return unapplied def qseries(self, repo, missing=None, start=0, length=None, status=None, summary=False): def displayname(pfx, patchname, state): if pfx: self.ui.write(pfx) if summary: ph = patchheader(self.join(patchname), self.plainmode) msg = ph.message and ph.message[0] or '' if self.ui.formatted(): width = self.ui.termwidth() - len(pfx) - len(patchname) - 2 if width > 0: msg = util.ellipsis(msg, width) else: msg = '' self.ui.write(patchname, label='qseries.' + state) self.ui.write(': ') self.ui.write(msg, label='qseries.message.' + state) else: self.ui.write(patchname, label='qseries.' + state) self.ui.write('\n') applied = set([p.name for p in self.applied]) if length is None: length = len(self.series) - start if not missing: if self.ui.verbose: idxwidth = len(str(start + length - 1)) for i in xrange(start, start + length): patch = self.series[i] if patch in applied: char, state = 'A', 'applied' elif self.pushable(i)[0]: char, state = 'U', 'unapplied' else: char, state = 'G', 'guarded' pfx = '' if self.ui.verbose: pfx = '%*d %s ' % (idxwidth, i, char) elif status and status != char: continue displayname(pfx, patch, state) else: msng_list = [] for root, dirs, files in os.walk(self.path): d = root[len(self.path) + 1:] for f in files: fl = os.path.join(d, f) if (fl not in self.series and fl not in (self.statuspath, self.seriespath, self.guardspath) and not fl.startswith('.')): msng_list.append(fl) for x in sorted(msng_list): pfx = self.ui.verbose and ('D ') or '' displayname(pfx, x, 'missing') def issaveline(self, l): if l.name == '.hg.patches.save.line': return True def qrepo(self, create=False): ui = self.baseui.copy() if create or os.path.isdir(self.join(".hg")): return hg.repository(ui, path=self.path, create=create) def restore(self, repo, rev, delete=None, qupdate=None): desc = repo[rev].description().strip() lines = desc.splitlines() i = 0 datastart = None series = [] applied = [] qpp = None for i, line in enumerate(lines): if line == 'Patch Data:': datastart = i + 1 elif line.startswith('Dirstate:'): l = line.rstrip() l = l[10:].split(' ') qpp = [bin(x) for x in l] elif datastart is not None: l = line.rstrip() n, name = l.split(':', 1) if n: applied.append(statusentry(bin(n), name)) else: series.append(l) if datastart is None: self.ui.warn(_("no saved patch data found\n")) return 1 self.ui.warn(_("restoring status: %s\n") % lines[0]) self.fullseries = series self.applied = applied self.parseseries() self.seriesdirty = True self.applieddirty = True heads = repo.changelog.heads() if delete: if rev not in heads: self.ui.warn(_("save entry has children, leaving it alone\n")) else: self.ui.warn(_("removing save entry %s\n") % short(rev)) pp = repo.dirstate.parents() if rev in pp: update = True else: update = False self.strip(repo, [rev], update=update, backup='strip') if qpp: self.ui.warn(_("saved queue repository parents: %s %s\n") % (short(qpp[0]), short(qpp[1]))) if qupdate: self.ui.status(_("updating queue directory\n")) r = self.qrepo() if not r: self.ui.warn(_("unable to load queue repository\n")) return 1 hg.clean(r, qpp[0]) def save(self, repo, msg=None): if not self.applied: self.ui.warn(_("save: no patches applied, exiting\n")) return 1 if self.issaveline(self.applied[-1]): self.ui.warn(_("status is already saved\n")) return 1 if not msg: msg = _("hg patches saved state") else: msg = "hg patches: " + msg.rstrip('\r\n') r = self.qrepo() if r: pp = r.dirstate.parents() msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1])) msg += "\n\nPatch Data:\n" msg += ''.join('%s\n' % x for x in self.applied) msg += ''.join(':%s\n' % x for x in self.fullseries) n = repo.commit(msg, force=True) if not n: self.ui.warn(_("repo commit failed\n")) return 1 self.applied.append(statusentry(n, '.hg.patches.save.line')) self.applieddirty = True self.removeundo(repo) def fullseriesend(self): if self.applied: p = self.applied[-1].name end = self.findseries(p) if end is None: return len(self.fullseries) return end + 1 return 0 def seriesend(self, all_patches=False): """If all_patches is False, return the index of the next pushable patch in the series, or the series length. If all_patches is True, return the index of the first patch past the last applied one. """ end = 0 def next(start): if all_patches or start >= len(self.series): return start for i in xrange(start, len(self.series)): p, reason = self.pushable(i) if p: return i self.explainpushable(i) return len(self.series) if self.applied: p = self.applied[-1].name try: end = self.series.index(p) except ValueError: return 0 return next(end + 1) return next(end) def appliedname(self, index): pname = self.applied[index].name if not self.ui.verbose: p = pname else: p = str(self.series.index(pname)) + " " + pname return p def qimport(self, repo, files, patchname=None, rev=None, existing=None, force=None, git=False): def checkseries(patchname): if patchname in self.series: raise util.Abort(_('patch %s is already in the series file') % patchname) if rev: if files: raise util.Abort(_('option "-r" not valid when importing ' 'files')) rev = scmutil.revrange(repo, rev) rev.sort(reverse=True) elif not files: raise util.Abort(_('no files or revisions specified')) if (len(files) > 1 or len(rev) > 1) and patchname: raise util.Abort(_('option "-n" not valid when importing multiple ' 'patches')) imported = [] if rev: # If mq patches are applied, we can only import revisions # that form a linear path to qbase. # Otherwise, they should form a linear path to a head. heads = repo.changelog.heads(repo.changelog.node(rev[-1])) if len(heads) > 1: raise util.Abort(_('revision %d is the root of more than one ' 'branch') % rev[-1]) if self.applied: base = repo.changelog.node(rev[0]) if base in [n.node for n in self.applied]: raise util.Abort(_('revision %d is already managed') % rev[0]) if heads != [self.applied[-1].node]: raise util.Abort(_('revision %d is not the parent of ' 'the queue') % rev[0]) base = repo.changelog.rev(self.applied[0].node) lastparent = repo.changelog.parentrevs(base)[0] else: if heads != [repo.changelog.node(rev[0])]: raise util.Abort(_('revision %d has unmanaged children') % rev[0]) lastparent = None diffopts = self.diffopts({'git': git}) for r in rev: if not repo[r].mutable(): raise util.Abort(_('revision %d is not mutable') % r, hint=_('see "hg help phases" for details')) p1, p2 = repo.changelog.parentrevs(r) n = repo.changelog.node(r) if p2 != nullrev: raise util.Abort(_('cannot import merge revision %d') % r) if lastparent and lastparent != r: raise util.Abort(_('revision %d is not the parent of %d') % (r, lastparent)) lastparent = p1 if not patchname: patchname = normname('%d.diff' % r) checkseries(patchname) self.checkpatchname(patchname, force) self.fullseries.insert(0, patchname) patchf = self.opener(patchname, "w") cmdutil.export(repo, [n], fp=patchf, opts=diffopts) patchf.close() se = statusentry(n, patchname) self.applied.insert(0, se) self.added.append(patchname) imported.append(patchname) patchname = None if rev and repo.ui.configbool('mq', 'secret', False): # if we added anything with --rev, we must move the secret root phases.retractboundary(repo, phases.secret, [n]) self.parseseries() self.applieddirty = True self.seriesdirty = True for i, filename in enumerate(files): if existing: if filename == '-': raise util.Abort(_('-e is incompatible with import from -')) filename = normname(filename) self.checkreservedname(filename) originpath = self.join(filename) if not os.path.isfile(originpath): raise util.Abort(_("patch %s does not exist") % filename) if patchname: self.checkpatchname(patchname, force) self.ui.write(_('renaming %s to %s\n') % (filename, patchname)) util.rename(originpath, self.join(patchname)) else: patchname = filename else: if filename == '-' and not patchname: raise util.Abort(_('need --name to import a patch from -')) elif not patchname: patchname = normname(os.path.basename(filename.rstrip('/'))) self.checkpatchname(patchname, force) try: if filename == '-': text = self.ui.fin.read() else: fp = hg.openpath(self.ui, filename) text = fp.read() fp.close() except (OSError, IOError): raise util.Abort(_("unable to read file %s") % filename) patchf = self.opener(patchname, "w") patchf.write(text) patchf.close() if not force: checkseries(patchname) if patchname not in self.series: index = self.fullseriesend() + i self.fullseries[index:index] = [patchname] self.parseseries() self.seriesdirty = True self.ui.warn(_("adding %s to series file\n") % patchname) self.added.append(patchname) imported.append(patchname) patchname = None self.removeundo(repo) return imported def fixkeepchangesopts(ui, opts): if (not ui.configbool('mq', 'keepchanges') or opts.get('force') or opts.get('exact')): return opts opts = dict(opts) opts['keep_changes'] = True return opts @command("qdelete|qremove|qrm", [('k', 'keep', None, _('keep patch file')), ('r', 'rev', [], _('stop managing a revision (DEPRECATED)'), _('REV'))], _('hg qdelete [-k] [PATCH]...')) def delete(ui, repo, *patches, **opts): """remove patches from queue The patches must not be applied, and at least one patch is required. Exact patch identifiers must be given. With -k/--keep, the patch files are preserved in the patch directory. To stop managing a patch and move it into permanent history, use the :hg:`qfinish` command.""" q = repo.mq q.delete(repo, patches, opts) q.savedirty() return 0 @command("qapplied", [('1', 'last', None, _('show only the preceding applied patch')) ] + seriesopts, _('hg qapplied [-1] [-s] [PATCH]')) def applied(ui, repo, patch=None, **opts): """print the patches already applied Returns 0 on success.""" q = repo.mq if patch: if patch not in q.series: raise util.Abort(_("patch %s is not in series file") % patch) end = q.series.index(patch) + 1 else: end = q.seriesend(True) if opts.get('last') and not end: ui.write(_("no patches applied\n")) return 1 elif opts.get('last') and end == 1: ui.write(_("only one patch applied\n")) return 1 elif opts.get('last'): start = end - 2 end = 1 else: start = 0 q.qseries(repo, length=end, start=start, status='A', summary=opts.get('summary')) @command("qunapplied", [('1', 'first', None, _('show only the first patch'))] + seriesopts, _('hg qunapplied [-1] [-s] [PATCH]')) def unapplied(ui, repo, patch=None, **opts): """print the patches not yet applied Returns 0 on success.""" q = repo.mq if patch: if patch not in q.series: raise util.Abort(_("patch %s is not in series file") % patch) start = q.series.index(patch) + 1 else: start = q.seriesend(True) if start == len(q.series) and opts.get('first'): ui.write(_("all patches applied\n")) return 1 length = opts.get('first') and 1 or None q.qseries(repo, start=start, length=length, status='U', summary=opts.get('summary')) @command("qimport", [('e', 'existing', None, _('import file in patch directory')), ('n', 'name', '', _('name of patch file'), _('NAME')), ('f', 'force', None, _('overwrite existing files')), ('r', 'rev', [], _('place existing revisions under mq control'), _('REV')), ('g', 'git', None, _('use git extended diff format')), ('P', 'push', None, _('qpush after importing'))], _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... [FILE]...')) def qimport(ui, repo, *filename, **opts): """import a patch or existing changeset The patch is inserted into the series after the last applied patch. If no patches have been applied, qimport prepends the patch to the series. The patch will have the same name as its source file unless you give it a new one with -n/--name. You can register an existing patch inside the patch directory with the -e/--existing flag. With -f/--force, an existing patch of the same name will be overwritten. An existing changeset may be placed under mq control with -r/--rev (e.g. qimport --rev tip -n patch will place tip under mq control). With -g/--git, patches imported with --rev will use the git diff format. See the diffs help topic for information on why this is important for preserving rename/copy information and permission changes. Use :hg:`qfinish` to remove changesets from mq control. To import a patch from standard input, pass - as the patch file. When importing from standard input, a patch name must be specified using the --name flag. To import an existing patch while renaming it:: hg qimport -e existing-patch -n new-name Returns 0 if import succeeded. """ lock = repo.lock() # cause this may move phase try: q = repo.mq try: imported = q.qimport( repo, filename, patchname=opts.get('name'), existing=opts.get('existing'), force=opts.get('force'), rev=opts.get('rev'), git=opts.get('git')) finally: q.savedirty() finally: lock.release() if imported and opts.get('push') and not opts.get('rev'): return q.push(repo, imported[-1]) return 0 def qinit(ui, repo, create): """initialize a new queue repository This command also creates a series file for ordering patches, and an mq-specific .hgignore file in the queue repository, to exclude the status and guards files (these contain mostly transient state). Returns 0 if initialization succeeded.""" q = repo.mq r = q.init(repo, create) q.savedirty() if r: if not os.path.exists(r.wjoin('.hgignore')): fp = r.wopener('.hgignore', 'w') fp.write('^\\.hg\n') fp.write('^\\.mq\n') fp.write('syntax: glob\n') fp.write('status\n') fp.write('guards\n') fp.close() if not os.path.exists(r.wjoin('series')): r.wopener('series', 'w').close() r[None].add(['.hgignore', 'series']) commands.add(ui, r) return 0 @command("^qinit", [('c', 'create-repo', None, _('create queue repository'))], _('hg qinit [-c]')) def init(ui, repo, **opts): """init a new queue repository (DEPRECATED) The queue repository is unversioned by default. If -c/--create-repo is specified, qinit will create a separate nested repository for patches (qinit -c may also be run later to convert an unversioned patch repository into a versioned one). You can use qcommit to commit changes to this queue repository. This command is deprecated. Without -c, it's implied by other relevant commands. With -c, use :hg:`init --mq` instead.""" return qinit(ui, repo, create=opts.get('create_repo')) @command("qclone", [('', 'pull', None, _('use pull protocol to copy metadata')), ('U', 'noupdate', None, _('do not update the new working directories')), ('', 'uncompressed', None, _('use uncompressed transfer (fast over LAN)')), ('p', 'patches', '', _('location of source patch repository'), _('REPO')), ] + commands.remoteopts, _('hg qclone [OPTION]... SOURCE [DEST]')) def clone(ui, source, dest=None, **opts): '''clone main and patch repository at same time If source is local, destination will have no patches applied. If source is remote, this command can not check if patches are applied in source, so cannot guarantee that patches are not applied in destination. If you clone remote repository, be sure before that it has no patches applied. Source patch repository is looked for in <src>/.hg/patches by default. Use -p <url> to change. The patch directory must be a nested Mercurial repository, as would be created by :hg:`init --mq`. Return 0 on success. ''' def patchdir(repo): """compute a patch repo url from a repo object""" url = repo.url() if url.endswith('/'): url = url[:-1] return url + '/.hg/patches' # main repo (destination and sources) if dest is None: dest = hg.defaultdest(source) sr = hg.peer(ui, opts, ui.expandpath(source)) # patches repo (source only) if opts.get('patches'): patchespath = ui.expandpath(opts.get('patches')) else: patchespath = patchdir(sr) try: hg.peer(ui, opts, patchespath) except error.RepoError: raise util.Abort(_('versioned patch repository not found' ' (see init --mq)')) qbase, destrev = None, None if sr.local(): repo = sr.local() if repo.mq.applied and repo[qbase].phase() != phases.secret: qbase = repo.mq.applied[0].node if not hg.islocal(dest): heads = set(repo.heads()) destrev = list(heads.difference(repo.heads(qbase))) destrev.append(repo.changelog.parents(qbase)[0]) elif sr.capable('lookup'): try: qbase = sr.lookup('qbase') except error.RepoError: pass ui.note(_('cloning main repository\n')) sr, dr = hg.clone(ui, opts, sr.url(), dest, pull=opts.get('pull'), rev=destrev, update=False, stream=opts.get('uncompressed')) ui.note(_('cloning patch repository\n')) hg.clone(ui, opts, opts.get('patches') or patchdir(sr), patchdir(dr), pull=opts.get('pull'), update=not opts.get('noupdate'), stream=opts.get('uncompressed')) if dr.local(): repo = dr.local() if qbase: ui.note(_('stripping applied patches from destination ' 'repository\n')) repo.mq.strip(repo, [qbase], update=False, backup=None) if not opts.get('noupdate'): ui.note(_('updating destination repository\n')) hg.update(repo, repo.changelog.tip()) @command("qcommit|qci", commands.table["^commit|ci"][1], _('hg qcommit [OPTION]... [FILE]...')) def commit(ui, repo, *pats, **opts): """commit changes in the queue repository (DEPRECATED) This command is deprecated; use :hg:`commit --mq` instead.""" q = repo.mq r = q.qrepo() if not r: raise util.Abort('no queue repository') commands.commit(r.ui, r, *pats, **opts) @command("qseries", [('m', 'missing', None, _('print patches not in series')), ] + seriesopts, _('hg qseries [-ms]')) def series(ui, repo, **opts): """print the entire series file Returns 0 on success.""" repo.mq.qseries(repo, missing=opts.get('missing'), summary=opts.get('summary')) return 0 @command("qtop", seriesopts, _('hg qtop [-s]')) def top(ui, repo, **opts): """print the name of the current patch Returns 0 on success.""" q = repo.mq t = q.applied and q.seriesend(True) or 0 if t: q.qseries(repo, start=t - 1, length=1, status='A', summary=opts.get('summary')) else: ui.write(_("no patches applied\n")) return 1 @command("qnext", seriesopts, _('hg qnext [-s]')) def next(ui, repo, **opts): """print the name of the next pushable patch Returns 0 on success.""" q = repo.mq end = q.seriesend() if end == len(q.series): ui.write(_("all patches applied\n")) return 1 q.qseries(repo, start=end, length=1, summary=opts.get('summary')) @command("qprev", seriesopts, _('hg qprev [-s]')) def prev(ui, repo, **opts): """print the name of the preceding applied patch Returns 0 on success.""" q = repo.mq l = len(q.applied) if l == 1: ui.write(_("only one patch applied\n")) return 1 if not l: ui.write(_("no patches applied\n")) return 1 idx = q.series.index(q.applied[-2].name) q.qseries(repo, start=idx, length=1, status='A', summary=opts.get('summary')) def setupheaderopts(ui, opts): if not opts.get('user') and opts.get('currentuser'): opts['user'] = ui.username() if not opts.get('date') and opts.get('currentdate'): opts['date'] = "%d %d" % util.makedate() @command("^qnew", [('e', 'edit', None, _('edit commit message')), ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')), ('g', 'git', None, _('use git extended diff format')), ('U', 'currentuser', None, _('add "From: <current user>" to patch')), ('u', 'user', '', _('add "From: <USER>" to patch'), _('USER')), ('D', 'currentdate', None, _('add "Date: <current date>" to patch')), ('d', 'date', '', _('add "Date: <DATE>" to patch'), _('DATE')) ] + commands.walkopts + commands.commitopts, _('hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...')) def new(ui, repo, patch, *args, **opts): """create a new patch qnew creates a new patch on top of the currently-applied patch (if any). The patch will be initialized with any outstanding changes in the working directory. You may also use -I/--include, -X/--exclude, and/or a list of files after the patch name to add only changes to matching files to the new patch, leaving the rest as uncommitted modifications. -u/--user and -d/--date can be used to set the (given) user and date, respectively. -U/--currentuser and -D/--currentdate set user to current user and date to current date. -e/--edit, -m/--message or -l/--logfile set the patch header as well as the commit message. If none is specified, the header is empty and the commit message is '[mq]: PATCH'. Use the -g/--git option to keep the patch in the git extended diff format. Read the diffs help topic for more information on why this is important for preserving permission changes and copy/rename information. Returns 0 on successful creation of a new patch. """ msg = cmdutil.logmessage(ui, opts) def getmsg(): return ui.edit(msg, opts.get('user') or ui.username()) q = repo.mq opts['msg'] = msg if opts.get('edit'): opts['msg'] = getmsg else: opts['msg'] = msg setupheaderopts(ui, opts) q.new(repo, patch, *args, **opts) q.savedirty() return 0 @command("^qrefresh", [('e', 'edit', None, _('edit commit message')), ('g', 'git', None, _('use git extended diff format')), ('s', 'short', None, _('refresh only files already in the patch and specified files')), ('U', 'currentuser', None, _('add/update author field in patch with current user')), ('u', 'user', '', _('add/update author field in patch with given user'), _('USER')), ('D', 'currentdate', None, _('add/update date field in patch with current date')), ('d', 'date', '', _('add/update date field in patch with given date'), _('DATE')) ] + commands.walkopts + commands.commitopts, _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')) def refresh(ui, repo, *pats, **opts): """update the current patch If any file patterns are provided, the refreshed patch will contain only the modifications that match those patterns; the remaining modifications will remain in the working directory. If -s/--short is specified, files currently included in the patch will be refreshed just like matched files and remain in the patch. If -e/--edit is specified, Mercurial will start your configured editor for you to enter a message. In case qrefresh fails, you will find a backup of your message in ``.hg/last-message.txt``. hg add/remove/copy/rename work as usual, though you might want to use git-style patches (-g/--git or [diff] git=1) to track copies and renames. See the diffs help topic for more information on the git diff format. Returns 0 on success. """ q = repo.mq message = cmdutil.logmessage(ui, opts) if opts.get('edit'): if not q.applied: ui.write(_("no patches applied\n")) return 1 if message: raise util.Abort(_('option "-e" incompatible with "-m" or "-l"')) patch = q.applied[-1].name ph = patchheader(q.join(patch), q.plainmode) message = ui.edit('\n'.join(ph.message), ph.user or ui.username()) # We don't want to lose the patch message if qrefresh fails (issue2062) repo.savecommitmessage(message) setupheaderopts(ui, opts) wlock = repo.wlock() try: ret = q.refresh(repo, pats, msg=message, **opts) q.savedirty() return ret finally: wlock.release() @command("^qdiff", commands.diffopts + commands.diffopts2 + commands.walkopts, _('hg qdiff [OPTION]... [FILE]...')) def diff(ui, repo, *pats, **opts): """diff of the current patch and subsequent modifications Shows a diff which includes the current patch as well as any changes which have been made in the working directory since the last refresh (thus showing what the current patch would become after a qrefresh). Use :hg:`diff` if you only want to see the changes made since the last qrefresh, or :hg:`export qtip` if you want to see changes made by the current patch without including changes made since the qrefresh. Returns 0 on success. """ repo.mq.diff(repo, pats, opts) return 0 @command('qfold', [('e', 'edit', None, _('edit patch header')), ('k', 'keep', None, _('keep folded patch files')), ] + commands.commitopts, _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')) def fold(ui, repo, *files, **opts): """fold the named patches into the current patch Patches must not yet be applied. Each patch will be successively applied to the current patch in the order given. If all the patches apply successfully, the current patch will be refreshed with the new cumulative patch, and the folded patches will be deleted. With -k/--keep, the folded patch files will not be removed afterwards. The header for each folded patch will be concatenated with the current patch header, separated by a line of ``* * *``. Returns 0 on success.""" q = repo.mq if not files: raise util.Abort(_('qfold requires at least one patch name')) if not q.checktoppatch(repo)[0]: raise util.Abort(_('no patches applied')) q.checklocalchanges(repo) message = cmdutil.logmessage(ui, opts) if opts.get('edit'): if message: raise util.Abort(_('option "-e" incompatible with "-m" or "-l"')) parent = q.lookup('qtip') patches = [] messages = [] for f in files: p = q.lookup(f) if p in patches or p == parent: ui.warn(_('skipping already folded patch %s\n') % p) if q.isapplied(p): raise util.Abort(_('qfold cannot fold already applied patch %s') % p) patches.append(p) for p in patches: if not message: ph = patchheader(q.join(p), q.plainmode) if ph.message: messages.append(ph.message) pf = q.join(p) (patchsuccess, files, fuzz) = q.patch(repo, pf) if not patchsuccess: raise util.Abort(_('error folding patch %s') % p) if not message: ph = patchheader(q.join(parent), q.plainmode) message, user = ph.message, ph.user for msg in messages: message.append('* * *') message.extend(msg) message = '\n'.join(message) if opts.get('edit'): message = ui.edit(message, user or ui.username()) diffopts = q.patchopts(q.diffopts(), *patches) wlock = repo.wlock() try: q.refresh(repo, msg=message, git=diffopts.git) q.delete(repo, patches, opts) q.savedirty() finally: wlock.release() @command("qgoto", [('', 'keep-changes', None, _('tolerate non-conflicting local changes')), ('f', 'force', None, _('overwrite any local changes')), ('', 'no-backup', None, _('do not save backup copies of files'))], _('hg qgoto [OPTION]... PATCH')) def goto(ui, repo, patch, **opts): '''push or pop patches until named patch is at top of stack Returns 0 on success.''' opts = fixkeepchangesopts(ui, opts) q = repo.mq patch = q.lookup(patch) nobackup = opts.get('no_backup') keepchanges = opts.get('keep_changes') if q.isapplied(patch): ret = q.pop(repo, patch, force=opts.get('force'), nobackup=nobackup, keepchanges=keepchanges) else: ret = q.push(repo, patch, force=opts.get('force'), nobackup=nobackup, keepchanges=keepchanges) q.savedirty() return ret @command("qguard", [('l', 'list', None, _('list all patches and guards')), ('n', 'none', None, _('drop all guards'))], _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]')) def guard(ui, repo, *args, **opts): '''set or print guards for a patch Guards control whether a patch can be pushed. A patch with no guards is always pushed. A patch with a positive guard ("+foo") is pushed only if the :hg:`qselect` command has activated it. A patch with a negative guard ("-foo") is never pushed if the :hg:`qselect` command has activated it. With no arguments, print the currently active guards. With arguments, set guards for the named patch. .. note:: Specifying negative guards now requires '--'. To set guards on another patch:: hg qguard other.patch -- +2.6.17 -stable Returns 0 on success. ''' def status(idx): guards = q.seriesguards[idx] or ['unguarded'] if q.series[idx] in applied: state = 'applied' elif q.pushable(idx)[0]: state = 'unapplied' else: state = 'guarded' label = 'qguard.patch qguard.%s qseries.%s' % (state, state) ui.write('%s: ' % ui.label(q.series[idx], label)) for i, guard in enumerate(guards): if guard.startswith('+'): ui.write(guard, label='qguard.positive') elif guard.startswith('-'): ui.write(guard, label='qguard.negative') else: ui.write(guard, label='qguard.unguarded') if i != len(guards) - 1: ui.write(' ') ui.write('\n') q = repo.mq applied = set(p.name for p in q.applied) patch = None args = list(args) if opts.get('list'): if args or opts.get('none'): raise util.Abort(_('cannot mix -l/--list with options or ' 'arguments')) for i in xrange(len(q.series)): status(i) return if not args or args[0][0:1] in '-+': if not q.applied: raise util.Abort(_('no patches applied')) patch = q.applied[-1].name if patch is None and args[0][0:1] not in '-+': patch = args.pop(0) if patch is None: raise util.Abort(_('no patch to work with')) if args or opts.get('none'): idx = q.findseries(patch) if idx is None: raise util.Abort(_('no patch named %s') % patch) q.setguards(idx, args) q.savedirty() else: status(q.series.index(q.lookup(patch))) @command("qheader", [], _('hg qheader [PATCH]')) def header(ui, repo, patch=None): """print the header of the topmost or specified patch Returns 0 on success.""" q = repo.mq if patch: patch = q.lookup(patch) else: if not q.applied: ui.write(_('no patches applied\n')) return 1 patch = q.lookup('qtip') ph = patchheader(q.join(patch), q.plainmode) ui.write('\n'.join(ph.message) + '\n') def lastsavename(path): (directory, base) = os.path.split(path) names = os.listdir(directory) namere = re.compile("%s.([0-9]+)" % base) maxindex = None maxname = None for f in names: m = namere.match(f) if m: index = int(m.group(1)) if maxindex is None or index > maxindex: maxindex = index maxname = f if maxname: return (os.path.join(directory, maxname), maxindex) return (None, None) def savename(path): (last, index) = lastsavename(path) if last is None: index = 0 newpath = path + ".%d" % (index + 1) return newpath @command("^qpush", [('', 'keep-changes', None, _('tolerate non-conflicting local changes')), ('f', 'force', None, _('apply on top of local changes')), ('e', 'exact', None, _('apply the target patch to its recorded parent')), ('l', 'list', None, _('list patch name in commit text')), ('a', 'all', None, _('apply all patches')), ('m', 'merge', None, _('merge from another queue (DEPRECATED)')), ('n', 'name', '', _('merge queue name (DEPRECATED)'), _('NAME')), ('', 'move', None, _('reorder patch series and apply only the patch')), ('', 'no-backup', None, _('do not save backup copies of files'))], _('hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]')) def push(ui, repo, patch=None, **opts): """push the next patch onto the stack By default, abort if the working directory contains uncommitted changes. With --keep-changes, abort only if the uncommitted files overlap with patched files. With -f/--force, backup and patch over uncommitted changes. Return 0 on success. """ q = repo.mq mergeq = None opts = fixkeepchangesopts(ui, opts) if opts.get('merge'): if opts.get('name'): newpath = repo.join(opts.get('name')) else: newpath, i = lastsavename(q.path) if not newpath: ui.warn(_("no saved queues found, please use -n\n")) return 1 mergeq = queue(ui, repo.baseui, repo.path, newpath) ui.warn(_("merging with queue at: %s\n") % mergeq.path) ret = q.push(repo, patch, force=opts.get('force'), list=opts.get('list'), mergeq=mergeq, all=opts.get('all'), move=opts.get('move'), exact=opts.get('exact'), nobackup=opts.get('no_backup'), keepchanges=opts.get('keep_changes')) return ret @command("^qpop", [('a', 'all', None, _('pop all patches')), ('n', 'name', '', _('queue name to pop (DEPRECATED)'), _('NAME')), ('', 'keep-changes', None, _('tolerate non-conflicting local changes')), ('f', 'force', None, _('forget any local changes to patched files')), ('', 'no-backup', None, _('do not save backup copies of files'))], _('hg qpop [-a] [-f] [PATCH | INDEX]')) def pop(ui, repo, patch=None, **opts): """pop the current patch off the stack Without argument, pops off the top of the patch stack. If given a patch name, keeps popping off patches until the named patch is at the top of the stack. By default, abort if the working directory contains uncommitted changes. With --keep-changes, abort only if the uncommitted files overlap with patched files. With -f/--force, backup and discard changes made to such files. Return 0 on success. """ opts = fixkeepchangesopts(ui, opts) localupdate = True if opts.get('name'): q = queue(ui, repo.baseui, repo.path, repo.join(opts.get('name'))) ui.warn(_('using patch queue: %s\n') % q.path) localupdate = False else: q = repo.mq ret = q.pop(repo, patch, force=opts.get('force'), update=localupdate, all=opts.get('all'), nobackup=opts.get('no_backup'), keepchanges=opts.get('keep_changes')) q.savedirty() return ret @command("qrename|qmv", [], _('hg qrename PATCH1 [PATCH2]')) def rename(ui, repo, patch, name=None, **opts): """rename a patch With one argument, renames the current patch to PATCH1. With two arguments, renames PATCH1 to PATCH2. Returns 0 on success.""" q = repo.mq if not name: name = patch patch = None if patch: patch = q.lookup(patch) else: if not q.applied: ui.write(_('no patches applied\n')) return patch = q.lookup('qtip') absdest = q.join(name) if os.path.isdir(absdest): name = normname(os.path.join(name, os.path.basename(patch))) absdest = q.join(name) q.checkpatchname(name) ui.note(_('renaming %s to %s\n') % (patch, name)) i = q.findseries(patch) guards = q.guard_re.findall(q.fullseries[i]) q.fullseries[i] = name + ''.join([' #' + g for g in guards]) q.parseseries() q.seriesdirty = True info = q.isapplied(patch) if info: q.applied[info[0]] = statusentry(info[1], name) q.applieddirty = True destdir = os.path.dirname(absdest) if not os.path.isdir(destdir): os.makedirs(destdir) util.rename(q.join(patch), absdest) r = q.qrepo() if r and patch in r.dirstate: wctx = r[None] wlock = r.wlock() try: if r.dirstate[patch] == 'a': r.dirstate.drop(patch) r.dirstate.add(name) else: wctx.copy(patch, name) wctx.forget([patch]) finally: wlock.release() q.savedirty() @command("qrestore", [('d', 'delete', None, _('delete save entry')), ('u', 'update', None, _('update queue working directory'))], _('hg qrestore [-d] [-u] REV')) def restore(ui, repo, rev, **opts): """restore the queue state saved by a revision (DEPRECATED) This command is deprecated, use :hg:`rebase` instead.""" rev = repo.lookup(rev) q = repo.mq q.restore(repo, rev, delete=opts.get('delete'), qupdate=opts.get('update')) q.savedirty() return 0 @command("qsave", [('c', 'copy', None, _('copy patch directory')), ('n', 'name', '', _('copy directory name'), _('NAME')), ('e', 'empty', None, _('clear queue status file')), ('f', 'force', None, _('force copy'))] + commands.commitopts, _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')) def save(ui, repo, **opts): """save current queue state (DEPRECATED) This command is deprecated, use :hg:`rebase` instead.""" q = repo.mq message = cmdutil.logmessage(ui, opts) ret = q.save(repo, msg=message) if ret: return ret q.savedirty() # save to .hg/patches before copying if opts.get('copy'): path = q.path if opts.get('name'): newpath = os.path.join(q.basepath, opts.get('name')) if os.path.exists(newpath): if not os.path.isdir(newpath): raise util.Abort(_('destination %s exists and is not ' 'a directory') % newpath) if not opts.get('force'): raise util.Abort(_('destination %s exists, ' 'use -f to force') % newpath) else: newpath = savename(path) ui.warn(_("copy %s to %s\n") % (path, newpath)) util.copyfiles(path, newpath) if opts.get('empty'): del q.applied[:] q.applieddirty = True q.savedirty() return 0 @command("strip", [ ('r', 'rev', [], _('strip specified revision (optional, ' 'can specify revisions without this ' 'option)'), _('REV')), ('f', 'force', None, _('force removal of changesets, discard ' 'uncommitted changes (no backup)')), ('b', 'backup', None, _('bundle only changesets with local revision' ' number greater than REV which are not' ' descendants of REV (DEPRECATED)')), ('', 'no-backup', None, _('no backups')), ('', 'nobackup', None, _('no backups (DEPRECATED)')), ('n', '', None, _('ignored (DEPRECATED)')), ('k', 'keep', None, _("do not modify working copy during strip")), ('B', 'bookmark', '', _("remove revs only reachable from given" " bookmark"))], _('hg strip [-k] [-f] [-n] [-B bookmark] [-r] REV...')) def strip(ui, repo, *revs, **opts): """strip changesets and all their descendants from the repository The strip command removes the specified changesets and all their descendants. If the working directory has uncommitted changes, the operation is aborted unless the --force flag is supplied, in which case changes will be discarded. If a parent of the working directory is stripped, then the working directory will automatically be updated to the most recent available ancestor of the stripped parent after the operation completes. Any stripped changesets are stored in ``.hg/strip-backup`` as a bundle (see :hg:`help bundle` and :hg:`help unbundle`). They can be restored by running :hg:`unbundle .hg/strip-backup/BUNDLE`, where BUNDLE is the bundle file created by the strip. Note that the local revision numbers will in general be different after the restore. Use the --no-backup option to discard the backup bundle once the operation completes. Strip is not a history-rewriting operation and can be used on changesets in the public phase. But if the stripped changesets have been pushed to a remote repository you will likely pull them again. Return 0 on success. """ backup = 'all' if opts.get('backup'): backup = 'strip' elif opts.get('no_backup') or opts.get('nobackup'): backup = 'none' cl = repo.changelog revs = list(revs) + opts.get('rev') revs = set(scmutil.revrange(repo, revs)) if opts.get('bookmark'): mark = opts.get('bookmark') marks = repo._bookmarks if mark not in marks: raise util.Abort(_("bookmark '%s' not found") % mark) # If the requested bookmark is not the only one pointing to a # a revision we have to only delete the bookmark and not strip # anything. revsets cannot detect that case. uniquebm = True for m, n in marks.iteritems(): if m != mark and n == repo[mark].node(): uniquebm = False break if uniquebm: rsrevs = repo.revs("ancestors(bookmark(%s)) - " "ancestors(head() and not bookmark(%s)) - " "ancestors(bookmark() and not bookmark(%s))", mark, mark, mark) revs.update(set(rsrevs)) if not revs: del marks[mark] marks.write() ui.write(_("bookmark '%s' deleted\n") % mark) if not revs: raise util.Abort(_('empty revision set')) descendants = set(cl.descendants(revs)) strippedrevs = revs.union(descendants) roots = revs.difference(descendants) update = False # if one of the wdir parent is stripped we'll need # to update away to an earlier revision for p in repo.dirstate.parents(): if p != nullid and cl.rev(p) in strippedrevs: update = True break rootnodes = set(cl.node(r) for r in roots) q = repo.mq if q.applied: # refresh queue state if we're about to strip # applied patches if cl.rev(repo.lookup('qtip')) in strippedrevs: q.applieddirty = True start = 0 end = len(q.applied) for i, statusentry in enumerate(q.applied): if statusentry.node in rootnodes: # if one of the stripped roots is an applied # patch, only part of the queue is stripped start = i break del q.applied[start:end] q.savedirty() revs = sorted(rootnodes) if update and opts.get('keep'): wlock = repo.wlock() try: urev = repo.mq.qparents(repo, revs[0]) uctx = repo[urev] # only reset the dirstate for files that would actually change # between the working context and uctx descendantrevs = repo.revs("%s::." % uctx.rev()) changedfiles = [] for rev in descendantrevs: # blindly reset the files, regardless of what actually changed changedfiles.extend(repo[rev].files()) # reset files that only changed in the dirstate too dirstate = repo.dirstate dirchanges = [f for f in dirstate if dirstate[f] != 'n'] changedfiles.extend(dirchanges) repo.dirstate.rebuild(urev, uctx.manifest(), changedfiles) repo.dirstate.write() update = False finally: wlock.release() if opts.get('bookmark'): del marks[mark] marks.write() ui.write(_("bookmark '%s' deleted\n") % mark) repo.mq.strip(repo, revs, backup=backup, update=update, force=opts.get('force')) return 0 @command("qselect", [('n', 'none', None, _('disable all guards')), ('s', 'series', None, _('list all guards in series file')), ('', 'pop', None, _('pop to before first guarded applied patch')), ('', 'reapply', None, _('pop, then reapply patches'))], _('hg qselect [OPTION]... [GUARD]...')) def select(ui, repo, *args, **opts): '''set or print guarded patches to push Use the :hg:`qguard` command to set or print guards on patch, then use qselect to tell mq which guards to use. A patch will be pushed if it has no guards or any positive guards match the currently selected guard, but will not be pushed if any negative guards match the current guard. For example:: qguard foo.patch -- -stable (negative guard) qguard bar.patch +stable (positive guard) qselect stable This activates the "stable" guard. mq will skip foo.patch (because it has a negative match) but push bar.patch (because it has a positive match). With no arguments, prints the currently active guards. With one argument, sets the active guard. Use -n/--none to deactivate guards (no other arguments needed). When no guards are active, patches with positive guards are skipped and patches with negative guards are pushed. qselect can change the guards on applied patches. It does not pop guarded patches by default. Use --pop to pop back to the last applied patch that is not guarded. Use --reapply (which implies --pop) to push back to the current patch afterwards, but skip guarded patches. Use -s/--series to print a list of all guards in the series file (no other arguments needed). Use -v for more information. Returns 0 on success.''' q = repo.mq guards = q.active() if args or opts.get('none'): old_unapplied = q.unapplied(repo) old_guarded = [i for i in xrange(len(q.applied)) if not q.pushable(i)[0]] q.setactive(args) q.savedirty() if not args: ui.status(_('guards deactivated\n')) if not opts.get('pop') and not opts.get('reapply'): unapplied = q.unapplied(repo) guarded = [i for i in xrange(len(q.applied)) if not q.pushable(i)[0]] if len(unapplied) != len(old_unapplied): ui.status(_('number of unguarded, unapplied patches has ' 'changed from %d to %d\n') % (len(old_unapplied), len(unapplied))) if len(guarded) != len(old_guarded): ui.status(_('number of guarded, applied patches has changed ' 'from %d to %d\n') % (len(old_guarded), len(guarded))) elif opts.get('series'): guards = {} noguards = 0 for gs in q.seriesguards: if not gs: noguards += 1 for g in gs: guards.setdefault(g, 0) guards[g] += 1 if ui.verbose: guards['NONE'] = noguards guards = guards.items() guards.sort(key=lambda x: x[0][1:]) if guards: ui.note(_('guards in series file:\n')) for guard, count in guards: ui.note('%2d ' % count) ui.write(guard, '\n') else: ui.note(_('no guards in series file\n')) else: if guards: ui.note(_('active guards:\n')) for g in guards: ui.write(g, '\n') else: ui.write(_('no active guards\n')) reapply = opts.get('reapply') and q.applied and q.appliedname(-1) popped = False if opts.get('pop') or opts.get('reapply'): for i in xrange(len(q.applied)): pushable, reason = q.pushable(i) if not pushable: ui.status(_('popping guarded patches\n')) popped = True if i == 0: q.pop(repo, all=True) else: q.pop(repo, str(i - 1)) break if popped: try: if reapply: ui.status(_('reapplying unguarded patches\n')) q.push(repo, reapply) finally: q.savedirty() @command("qfinish", [('a', 'applied', None, _('finish all applied changesets'))], _('hg qfinish [-a] [REV]...')) def finish(ui, repo, *revrange, **opts): """move applied patches into repository history Finishes the specified revisions (corresponding to applied patches) by moving them out of mq control into regular repository history. Accepts a revision range or the -a/--applied option. If --applied is specified, all applied mq revisions are removed from mq control. Otherwise, the given revisions must be at the base of the stack of applied patches. This can be especially useful if your changes have been applied to an upstream repository, or if you are about to push your changes to upstream. Returns 0 on success. """ if not opts.get('applied') and not revrange: raise util.Abort(_('no revisions specified')) elif opts.get('applied'): revrange = ('qbase::qtip',) + revrange q = repo.mq if not q.applied: ui.status(_('no patches applied\n')) return 0 revs = scmutil.revrange(repo, revrange) if repo['.'].rev() in revs and repo[None].files(): ui.warn(_('warning: uncommitted changes in the working directory\n')) # queue.finish may changes phases but leave the responsibility to lock the # repo to the caller to avoid deadlock with wlock. This command code is # responsibility for this locking. lock = repo.lock() try: q.finish(repo, revs) q.savedirty() finally: lock.release() return 0 @command("qqueue", [('l', 'list', False, _('list all available queues')), ('', 'active', False, _('print name of active queue')), ('c', 'create', False, _('create new queue')), ('', 'rename', False, _('rename active queue')), ('', 'delete', False, _('delete reference to queue')), ('', 'purge', False, _('delete queue, and remove patch dir')), ], _('[OPTION] [QUEUE]')) def qqueue(ui, repo, name=None, **opts): '''manage multiple patch queues Supports switching between different patch queues, as well as creating new patch queues and deleting existing ones. Omitting a queue name or specifying -l/--list will show you the registered queues - by default the "normal" patches queue is registered. The currently active queue will be marked with "(active)". Specifying --active will print only the name of the active queue. To create a new queue, use -c/--create. The queue is automatically made active, except in the case where there are applied patches from the currently active queue in the repository. Then the queue will only be created and switching will fail. To delete an existing queue, use --delete. You cannot delete the currently active queue. Returns 0 on success. ''' q = repo.mq _defaultqueue = 'patches' _allqueues = 'patches.queues' _activequeue = 'patches.queue' def _getcurrent(): cur = os.path.basename(q.path) if cur.startswith('patches-'): cur = cur[8:] return cur def _noqueues(): try: fh = repo.opener(_allqueues, 'r') fh.close() except IOError: return True return False def _getqueues(): current = _getcurrent() try: fh = repo.opener(_allqueues, 'r') queues = [queue.strip() for queue in fh if queue.strip()] fh.close() if current not in queues: queues.append(current) except IOError: queues = [_defaultqueue] return sorted(queues) def _setactive(name): if q.applied: raise util.Abort(_('new queue created, but cannot make active ' 'as patches are applied')) _setactivenocheck(name) def _setactivenocheck(name): fh = repo.opener(_activequeue, 'w') if name != 'patches': fh.write(name) fh.close() def _addqueue(name): fh = repo.opener(_allqueues, 'a') fh.write('%s\n' % (name,)) fh.close() def _queuedir(name): if name == 'patches': return repo.join('patches') else: return repo.join('patches-' + name) def _validname(name): for n in name: if n in ':\\/.': return False return True def _delete(name): if name not in existing: raise util.Abort(_('cannot delete queue that does not exist')) current = _getcurrent() if name == current: raise util.Abort(_('cannot delete currently active queue')) fh = repo.opener('patches.queues.new', 'w') for queue in existing: if queue == name: continue fh.write('%s\n' % (queue,)) fh.close() util.rename(repo.join('patches.queues.new'), repo.join(_allqueues)) if not name or opts.get('list') or opts.get('active'): current = _getcurrent() if opts.get('active'): ui.write('%s\n' % (current,)) return for queue in _getqueues(): ui.write('%s' % (queue,)) if queue == current and not ui.quiet: ui.write(_(' (active)\n')) else: ui.write('\n') return if not _validname(name): raise util.Abort( _('invalid queue name, may not contain the characters ":\\/."')) existing = _getqueues() if opts.get('create'): if name in existing: raise util.Abort(_('queue "%s" already exists') % name) if _noqueues(): _addqueue(_defaultqueue) _addqueue(name) _setactive(name) elif opts.get('rename'): current = _getcurrent() if name == current: raise util.Abort(_('can\'t rename "%s" to its current name') % name) if name in existing: raise util.Abort(_('queue "%s" already exists') % name) olddir = _queuedir(current) newdir = _queuedir(name) if os.path.exists(newdir): raise util.Abort(_('non-queue directory "%s" already exists') % newdir) fh = repo.opener('patches.queues.new', 'w') for queue in existing: if queue == current: fh.write('%s\n' % (name,)) if os.path.exists(olddir): util.rename(olddir, newdir) else: fh.write('%s\n' % (queue,)) fh.close() util.rename(repo.join('patches.queues.new'), repo.join(_allqueues)) _setactivenocheck(name) elif opts.get('delete'): _delete(name) elif opts.get('purge'): if name in existing: _delete(name) qdir = _queuedir(name) if os.path.exists(qdir): shutil.rmtree(qdir) else: if name not in existing: raise util.Abort(_('use --create to create a new queue')) _setactive(name) def mqphasedefaults(repo, roots): """callback used to set mq changeset as secret when no phase data exists""" if repo.mq.applied: if repo.ui.configbool('mq', 'secret', False): mqphase = phases.secret else: mqphase = phases.draft qbase = repo[repo.mq.applied[0].node] roots[mqphase].add(qbase.node()) return roots def reposetup(ui, repo): class mqrepo(repo.__class__): @util.propertycache def mq(self): return queue(self.ui, self.baseui, self.path) def abortifwdirpatched(self, errmsg, force=False): if self.mq.applied and not force: parents = self.dirstate.parents() patches = [s.node for s in self.mq.applied] if parents[0] in patches or parents[1] in patches: raise util.Abort(errmsg) def commit(self, text="", user=None, date=None, match=None, force=False, editor=False, extra={}): self.abortifwdirpatched( _('cannot commit over an applied mq patch'), force) return super(mqrepo, self).commit(text, user, date, match, force, editor, extra) def checkpush(self, force, revs): if self.mq.applied and not force: outapplied = [e.node for e in self.mq.applied] if revs: # Assume applied patches have no non-patch descendants and # are not on remote already. Filtering any changeset not # pushed. heads = set(revs) for node in reversed(outapplied): if node in heads: break else: outapplied.pop() # looking for pushed and shared changeset for node in outapplied: if self[node].phase() < phases.secret: raise util.Abort(_('source has mq patches applied')) # no non-secret patches pushed super(mqrepo, self).checkpush(force, revs) def _findtags(self): '''augment tags from base class with patch tags''' result = super(mqrepo, self)._findtags() q = self.mq if not q.applied: return result mqtags = [(patch.node, patch.name) for patch in q.applied] try: # for now ignore filtering business self.unfiltered().changelog.rev(mqtags[-1][0]) except error.LookupError: self.ui.warn(_('mq status file refers to unknown node %s\n') % short(mqtags[-1][0])) return result # do not add fake tags for filtered revisions included = self.changelog.hasnode mqtags = [mqt for mqt in mqtags if included(mqt[0])] if not mqtags: return result mqtags.append((mqtags[-1][0], 'qtip')) mqtags.append((mqtags[0][0], 'qbase')) mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent')) tags = result[0] for patch in mqtags: if patch[1] in tags: self.ui.warn(_('tag %s overrides mq patch of the same ' 'name\n') % patch[1]) else: tags[patch[1]] = patch[0] return result if repo.local(): repo.__class__ = mqrepo repo._phasedefaults.append(mqphasedefaults) def mqimport(orig, ui, repo, *args, **kwargs): if (util.safehasattr(repo, 'abortifwdirpatched') and not kwargs.get('no_commit', False)): repo.abortifwdirpatched(_('cannot import over an applied patch'), kwargs.get('force')) return orig(ui, repo, *args, **kwargs) def mqinit(orig, ui, *args, **kwargs): mq = kwargs.pop('mq', None) if not mq: return orig(ui, *args, **kwargs) if args: repopath = args[0] if not hg.islocal(repopath): raise util.Abort(_('only a local queue repository ' 'may be initialized')) else: repopath = cmdutil.findrepo(os.getcwd()) if not repopath: raise util.Abort(_('there is no Mercurial repository here ' '(.hg not found)')) repo = hg.repository(ui, repopath) return qinit(ui, repo, True) def mqcommand(orig, ui, repo, *args, **kwargs): """Add --mq option to operate on patch repository instead of main""" # some commands do not like getting unknown options mq = kwargs.pop('mq', None) if not mq: return orig(ui, repo, *args, **kwargs) q = repo.mq r = q.qrepo() if not r: raise util.Abort(_('no queue repository')) return orig(r.ui, r, *args, **kwargs) def summary(orig, ui, repo, *args, **kwargs): r = orig(ui, repo, *args, **kwargs) q = repo.mq m = [] a, u = len(q.applied), len(q.unapplied(repo)) if a: m.append(ui.label(_("%d applied"), 'qseries.applied') % a) if u: m.append(ui.label(_("%d unapplied"), 'qseries.unapplied') % u) if m: # i18n: column positioning for "hg summary" ui.write(_("mq: %s\n") % ', '.join(m)) else: # i18n: column positioning for "hg summary" ui.note(_("mq: (empty queue)\n")) return r def revsetmq(repo, subset, x): """``mq()`` Changesets managed by MQ. """ revset.getargs(x, 0, 0, _("mq takes no arguments")) applied = set([repo[r.node].rev() for r in repo.mq.applied]) return [r for r in subset if r in applied] # tell hggettext to extract docstrings from these functions: i18nfunctions = [revsetmq] def extsetup(ui): # Ensure mq wrappers are called first, regardless of extension load order by # NOT wrapping in uisetup() and instead deferring to init stage two here. mqopt = [('', 'mq', None, _("operate on patch repository"))] extensions.wrapcommand(commands.table, 'import', mqimport) extensions.wrapcommand(commands.table, 'summary', summary) entry = extensions.wrapcommand(commands.table, 'init', mqinit) entry[1].extend(mqopt) nowrap = set(commands.norepo.split(" ")) def dotable(cmdtable): for cmd in cmdtable.keys(): cmd = cmdutil.parsealiases(cmd)[0] if cmd in nowrap: continue entry = extensions.wrapcommand(cmdtable, cmd, mqcommand) entry[1].extend(mqopt) dotable(commands.table) for extname, extmodule in extensions.extensions(): if extmodule.__file__ != __file__: dotable(getattr(extmodule, 'cmdtable', {})) revset.symbols['mq'] = revsetmq colortable = {'qguard.negative': 'red', 'qguard.positive': 'yellow', 'qguard.unguarded': 'green', 'qseries.applied': 'blue bold underline', 'qseries.guarded': 'black bold', 'qseries.missing': 'red bold', 'qseries.unapplied': 'black bold'} commands.inferrepo += " qnew qrefresh qdiff qcommit"
apache-2.0
pfmooney/dd-agent
checks.d/supervisord.py
33
6841
# stdlib from collections import defaultdict import itertools import re import socket import time import xmlrpclib # 3p import supervisor.xmlrpc # project from checks import AgentCheck DEFAULT_HOST = 'localhost' DEFAULT_PORT = '9001' DEFAULT_SOCKET_IP = 'http://127.0.0.1' DD_STATUS = { 'STOPPED': AgentCheck.CRITICAL, 'STARTING': AgentCheck.UNKNOWN, 'RUNNING': AgentCheck.OK, 'BACKOFF': AgentCheck.CRITICAL, 'STOPPING': AgentCheck.CRITICAL, 'EXITED': AgentCheck.CRITICAL, 'FATAL': AgentCheck.CRITICAL, 'UNKNOWN': AgentCheck.UNKNOWN } PROCESS_STATUS = { AgentCheck.CRITICAL: 'down', AgentCheck.OK: 'up', AgentCheck.UNKNOWN: 'unknown' } SERVER_TAG = 'supervisord_server' PROCESS_TAG = 'supervisord_process' FORMAT_TIME = lambda x: time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(x)) SERVER_SERVICE_CHECK = 'supervisord.can_connect' PROCESS_SERVICE_CHECK = 'supervisord.process.status' class SupervisordCheck(AgentCheck): def check(self, instance): server_name = instance.get('name') if not server_name or not server_name.strip(): raise Exception("Supervisor server name not specified in yaml configuration.") server_service_check_tags = ['%s:%s' % (SERVER_TAG, server_name)] supe = self._connect(instance) count_by_status = defaultdict(int) # Gather all process information try: processes = supe.getAllProcessInfo() except xmlrpclib.Fault, error: raise Exception( 'An error occurred while reading process information: %s %s' % (error.faultCode, error.faultString) ) except socket.error, e: host = instance.get('host', DEFAULT_HOST) port = instance.get('port', DEFAULT_PORT) sock = instance.get('socket') if sock is None: msg = 'Cannot connect to http://%s:%s. ' \ 'Make sure supervisor is running and XML-RPC ' \ 'inet interface is enabled.' % (host, port) else: msg = 'Cannot connect to %s. Make sure sure supervisor ' \ 'is running and socket is enabled and socket file' \ ' has the right permissions.' % sock self.service_check(SERVER_SERVICE_CHECK, AgentCheck.CRITICAL, tags=server_service_check_tags, message=msg) raise Exception(msg) except xmlrpclib.ProtocolError, e: if e.errcode == 401: # authorization error msg = 'Username or password to %s are incorrect.' % server_name else: msg = "An error occurred while connecting to %s: "\ "%s %s " % (server_name, e.errcode, e.errmsg) self.service_check(SERVER_SERVICE_CHECK, AgentCheck.CRITICAL, tags=server_service_check_tags, message=msg) raise Exception(msg) # If we're here, we were able to connect to the server self.service_check(SERVER_SERVICE_CHECK, AgentCheck.OK, tags=server_service_check_tags) # Filter monitored processes on configuration directives proc_regex = instance.get('proc_regex', []) if not isinstance(proc_regex, list): raise Exception("Empty or invalid proc_regex.") proc_names = instance.get('proc_names', []) if not isinstance(proc_names, list): raise Exception("Empty or invalid proc_names.") # Collect information on each monitored process monitored_processes = [] # monitor all processes if no filters were specified if len(proc_regex) == 0 and len(proc_names) == 0: monitored_processes = processes for pattern, process in itertools.product(proc_regex, processes): if re.match(pattern, process['name']) and process not in monitored_processes: monitored_processes.append(process) for process in processes: if process['name'] in proc_names and process not in monitored_processes: monitored_processes.append(process) # Report service checks and uptime for each process for proc in monitored_processes: proc_name = proc['name'] tags = ['%s:%s' % (SERVER_TAG, server_name), '%s:%s' % (PROCESS_TAG, proc_name)] # Report Service Check status = DD_STATUS[proc['statename']] msg = self._build_message(proc) count_by_status[status] += 1 self.service_check(PROCESS_SERVICE_CHECK, status, tags=tags, message=msg) # Report Uptime uptime = self._extract_uptime(proc) self.gauge('supervisord.process.uptime', uptime, tags=tags) # Report counts by status tags = ['%s:%s' % (SERVER_TAG, server_name)] for status in PROCESS_STATUS: self.gauge('supervisord.process.count', count_by_status[status], tags=tags + ['status:%s' % PROCESS_STATUS[status]]) @staticmethod def _connect(instance): sock = instance.get('socket') if sock is not None: host = instance.get('host', DEFAULT_SOCKET_IP) transport = supervisor.xmlrpc.SupervisorTransport(None, None, sock) server = xmlrpclib.ServerProxy(host, transport=transport) else: host = instance.get('host', DEFAULT_HOST) port = instance.get('port', DEFAULT_PORT) user = instance.get('user') password = instance.get('pass') auth = '%s:%s@' % (user, password) if user and password else '' server = xmlrpclib.Server('http://%s%s:%s/RPC2' % (auth, host, port)) return server.supervisor @staticmethod def _extract_uptime(proc): start, now = int(proc['start']), int(proc['now']) status = proc['statename'] active_state = status in ['BACKOFF', 'RUNNING', 'STOPPING'] return now - start if active_state else 0 @staticmethod def _build_message(proc): start, stop, now = int(proc['start']), int(proc['stop']), int(proc['now']) proc['now_str'] = FORMAT_TIME(now) proc['start_str'] = FORMAT_TIME(start) proc['stop_str'] = '' if stop == 0 else FORMAT_TIME(stop) return """Current time: %(now_str)s Process name: %(name)s Process group: %(group)s Description: %(description)s Error log file: %(stderr_logfile)s Stdout log file: %(stdout_logfile)s Log file: %(logfile)s State: %(statename)s Start time: %(start_str)s Stop time: %(stop_str)s Exit Status: %(exitstatus)s""" % proc
bsd-3-clause
brson/servo
src/components/script/dom/bindings/codegen/parser/tests/test_callback.py
134
1486
import WebIDL def WebIDLTest(parser, harness): parser.parse(""" interface TestCallback { attribute CallbackType? listener; }; callback CallbackType = boolean (unsigned long arg); """) results = parser.finish() harness.ok(True, "TestCallback interface parsed without error.") harness.check(len(results), 2, "Should be one production.") iface = results[0] harness.ok(isinstance(iface, WebIDL.IDLInterface), "Should be an IDLInterface") harness.check(iface.identifier.QName(), "::TestCallback", "Interface has the right QName") harness.check(iface.identifier.name, "TestCallback", "Interface has the right name") harness.check(len(iface.members), 1, "Expect %s members" % 1) attr = iface.members[0] harness.ok(isinstance(attr, WebIDL.IDLAttribute), "Should be an IDLAttribute") harness.ok(attr.isAttr(), "Should be an attribute") harness.ok(not attr.isMethod(), "Attr is not an method") harness.ok(not attr.isConst(), "Attr is not a const") harness.check(attr.identifier.QName(), "::TestCallback::listener", "Attr has the right QName") harness.check(attr.identifier.name, "listener", "Attr has the right name") t = attr.type harness.ok(not isinstance(t, WebIDL.IDLWrapperType), "Attr has the right type") harness.ok(isinstance(t, WebIDL.IDLNullableType), "Attr has the right type") harness.ok(t.isCallback(), "Attr has the right type")
mpl-2.0
newswangerd/ansible
test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/lag_interfaces/lag_interfaces.py
47
5251
# # -*- coding: utf-8 -*- # Copyright 2019 Red Hat # GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) """ The vyos lag_interfaces fact class It is in this file the configuration is collected from the device for a given resource, parsed, and the facts tree is populated based on the configuration. """ from __future__ import absolute_import, division, print_function __metaclass__ = type from re import findall, search, M from copy import deepcopy from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import ( utils, ) from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.argspec.lag_interfaces.lag_interfaces import ( Lag_interfacesArgs, ) class Lag_interfacesFacts(object): """ The vyos lag_interfaces fact class """ def __init__(self, module, subspec="config", options="options"): self._module = module self.argument_spec = Lag_interfacesArgs.argument_spec spec = deepcopy(self.argument_spec) if subspec: if options: facts_argument_spec = spec[subspec][options] else: facts_argument_spec = spec[subspec] else: facts_argument_spec = spec self.generated_spec = utils.generate_dict(facts_argument_spec) def populate_facts(self, connection, ansible_facts, data=None): """ Populate the facts for lag_interfaces :param module: the module instance :param connection: the device connection :param data: previously collected conf :rtype: dictionary :returns: facts """ if not data: data = connection.get_config() objs = [] lag_names = findall(r"^set interfaces bonding (\S+)", data, M) if lag_names: for lag in set(lag_names): lag_regex = r" %s .+$" % lag cfg = findall(lag_regex, data, M) obj = self.render_config(cfg) output = connection.run_commands( ["show interfaces bonding " + lag + " slaves"] ) lines = output[0].splitlines() members = [] member = {} if len(lines) > 1: for line in lines[2:]: splitted_line = line.split() if len(splitted_line) > 1: member["member"] = splitted_line[0] members.append(member) else: members = [] member = {} obj["name"] = lag.strip("'") if members: obj["members"] = members if obj: objs.append(obj) facts = {} if objs: facts["lag_interfaces"] = [] params = utils.validate_config( self.argument_spec, {"config": objs} ) for cfg in params["config"]: facts["lag_interfaces"].append(utils.remove_empties(cfg)) ansible_facts["ansible_network_resources"].update(facts) return ansible_facts def render_config(self, conf): """ Render config as dictionary structure and delete keys from spec for null values :param spec: The facts tree, generated from the argspec :param conf: The configuration :rtype: dictionary :returns: The generated config """ arp_monitor_conf = "\n".join( filter(lambda x: ("arp-monitor" in x), conf) ) hash_policy_conf = "\n".join( filter(lambda x: ("hash-policy" in x), conf) ) lag_conf = "\n".join(filter(lambda x: ("bond" in x), conf)) config = self.parse_attribs(["mode", "primary"], lag_conf) config["arp_monitor"] = self.parse_arp_monitor(arp_monitor_conf) config["hash_policy"] = self.parse_hash_policy(hash_policy_conf) return utils.remove_empties(config) def parse_attribs(self, attribs, conf): config = {} for item in attribs: value = utils.parse_conf_arg(conf, item) if value: config[item] = value.strip("'") else: config[item] = None return utils.remove_empties(config) def parse_arp_monitor(self, conf): arp_monitor = None if conf: arp_monitor = {} target_list = [] interval = search(r"^.*arp-monitor interval (.+)", conf, M) targets = findall(r"^.*arp-monitor target '(.+)'", conf, M) if targets: for target in targets: target_list.append(target) arp_monitor["target"] = target_list if interval: value = interval.group(1).strip("'") arp_monitor["interval"] = int(value) return arp_monitor def parse_hash_policy(self, conf): hash_policy = None if conf: hash_policy = search(r"^.*hash-policy (.+)", conf, M) hash_policy = hash_policy.group(1).strip("'") return hash_policy
gpl-3.0
robhudson/zamboni
mkt/api/tests/test_serializer.py
3
3068
# -*- coding: utf-8 -*- from decimal import Decimal import json from django.contrib.auth.models import User from django.core.handlers.wsgi import WSGIRequest from django.test import TestCase from django.utils.http import urlencode import mock from nose.tools import eq_, ok_ from rest_framework.serializers import Serializer, ValidationError from simplejson import JSONDecodeError from test_utils import RequestFactory from mkt.api.serializers import PotatoCaptchaSerializer, URLSerializerMixin from mkt.site.fixtures import fixture from mkt.site.tests.test_forms import PotatoCaptchaTestCase class TestPotatoCaptchaSerializer(PotatoCaptchaTestCase): fixtures = fixture('user_999') def test_success_authenticated(self): self.request.user = User.objects.get(id=999) self.request.user.is_authenticated = lambda: True serializer = PotatoCaptchaSerializer(data={}, context=self.context) eq_(serializer.is_valid(), True) def test_success_anonymous(self): data = {'tuber': '', 'sprout': 'potato'} serializer = PotatoCaptchaSerializer(data=data, context=self.context) eq_(serializer.is_valid(), True) def test_no_context(self): data = {'tuber': '', 'sprout': 'potato'} with self.assertRaises(ValidationError): PotatoCaptchaSerializer(data=data) def test_error_anonymous_bad_tuber(self): data = {'tuber': 'HAMMMMMMMMMMMMM', 'sprout': 'potato'} serializer = PotatoCaptchaSerializer(data=data, context=self.context) eq_(serializer.is_valid(), False) def test_error_anonymous_bad_sprout(self): data = {'tuber': 'HAMMMMMMMMMMMMM', 'sprout': ''} serializer = PotatoCaptchaSerializer(data=data, context=self.context) eq_(serializer.is_valid(), False) def test_error_anonymous_bad_tuber_and_sprout(self): serializer = PotatoCaptchaSerializer(data={}, context=self.context) eq_(serializer.is_valid(), False) class TestURLSerializerMixin(TestCase): SerializerClass = type('Potato', (URLSerializerMixin, Serializer), {'Meta': None}) Struct = type('Struct', (object,), {}) url_basename = 'potato' def setUp(self): self.SerializerClass.Meta = type('Meta', (self.Struct,), {'model': User, 'url_basename': self.url_basename}) self.request = RequestFactory().get('/') self.request.API_VERSION = 1 self.serializer = self.SerializerClass(context= {'request': self.request}) self.obj = self.Struct() self.obj.pk = 42 @mock.patch('mkt.api.serializers.reverse') def test_get_url(self, mock_reverse): self.serializer.get_url(self.obj) reverse_args, reverse_kwargs = mock_reverse.call_args ok_(mock_reverse.called) eq_(reverse_args[0], '%s-detail' % self.url_basename) eq_(type(reverse_kwargs['request']), WSGIRequest) eq_(reverse_kwargs['kwargs']['pk'], self.obj.pk)
bsd-3-clause
ProjectQ-Framework/ProjectQ
examples/grover.py
1
2364
# -*- coding: utf-8 -*- # pylint: skip-file import math from projectq import MainEngine from projectq.ops import H, Z, X, Measure, All from projectq.meta import Loop, Compute, Uncompute, Control def run_grover(eng, n, oracle): """ Runs Grover's algorithm on n qubit using the provided quantum oracle. Args: eng (MainEngine): Main compiler engine to run Grover on. n (int): Number of bits in the solution. oracle (function): Function accepting the engine, an n-qubit register, and an output qubit which is flipped by the oracle for the correct bit string. Returns: solution (list<int>): Solution bit-string. """ x = eng.allocate_qureg(n) # start in uniform superposition All(H) | x # number of iterations we have to run: num_it = int(math.pi / 4.0 * math.sqrt(1 << n)) # prepare the oracle output qubit (the one that is flipped to indicate the # solution. start in state 1/sqrt(2) * (|0> - |1>) s.t. a bit-flip turns # into a (-1)-phase. oracle_out = eng.allocate_qubit() X | oracle_out H | oracle_out # run num_it iterations with Loop(eng, num_it): # oracle adds a (-1)-phase to the solution oracle(eng, x, oracle_out) # reflection across uniform superposition with Compute(eng): All(H) | x All(X) | x with Control(eng, x[0:-1]): Z | x[-1] Uncompute(eng) All(Measure) | x Measure | oracle_out eng.flush() # return result return [int(qubit) for qubit in x] def alternating_bits_oracle(eng, qubits, output): """ Marks the solution string 1,0,1,0,...,0,1 by flipping the output qubit, conditioned on qubits being equal to the alternating bit-string. Args: eng (MainEngine): Main compiler engine the algorithm is being run on. qubits (Qureg): n-qubit quantum register Grover search is run on. output (Qubit): Output qubit to flip in order to mark the solution. """ with Compute(eng): All(X) | qubits[1::2] with Control(eng, qubits): X | output Uncompute(eng) if __name__ == "__main__": eng = MainEngine() # use default compiler engine # run Grover search to find a 7-bit solution print(run_grover(eng, 7, alternating_bits_oracle))
apache-2.0
befelix/GPy
GPy/inference/latent_function_inference/gaussian_grid_inference.py
6
4341
# Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) # Kurt Cutajar # This implementation of converting GPs to state space models is based on the article: #@article{Gilboa:2015, # title={Scaling multidimensional inference for structured Gaussian processes}, # author={Gilboa, Elad and Saat{\c{c}}i, Yunus and Cunningham, John P}, # journal={Pattern Analysis and Machine Intelligence, IEEE Transactions on}, # volume={37}, # number={2}, # pages={424--436}, # year={2015}, # publisher={IEEE} #} from .grid_posterior import GridPosterior import numpy as np from . import LatentFunctionInference log_2_pi = np.log(2*np.pi) class GaussianGridInference(LatentFunctionInference): """ An object for inference when the likelihood is Gaussian and inputs are on a grid. The function self.inference returns a GridPosterior object, which summarizes the posterior. """ def __init__(self): pass def kron_mvprod(self, A, b): x = b N = 1 D = len(A) G = np.zeros((D), dtype=np.int_) for d in range(0, D): G[d] = len(A[d]) N = np.prod(G) for d in range(D-1, -1, -1): X = np.reshape(x, (G[d], int(np.round(N/G[d]))), order='F') Z = np.dot(A[d], X) Z = Z.T x = np.reshape(Z, (-1, 1), order='F') return x def inference(self, kern, X, likelihood, Y, Y_metadata=None): """ Returns a GridPosterior class containing essential quantities of the posterior """ N = X.shape[0] #number of training points D = X.shape[1] #number of dimensions Kds = np.zeros(D, dtype=object) #vector for holding covariance per dimension Qs = np.zeros(D, dtype=object) #vector for holding eigenvectors of covariance per dimension QTs = np.zeros(D, dtype=object) #vector for holding transposed eigenvectors of covariance per dimension V_kron = 1 # kronecker product of eigenvalues # retrieve the one-dimensional variation of the designated kernel oneDkernel = kern.get_one_dimensional_kernel(D) for d in range(D): xg = list(set(X[:,d])) #extract unique values for a dimension xg = np.reshape(xg, (len(xg), 1)) oneDkernel.lengthscale = kern.lengthscale[d] Kds[d] = oneDkernel.K(xg) [V, Q] = np.linalg.eig(Kds[d]) V_kron = np.kron(V_kron, V) Qs[d] = Q QTs[d] = Q.T noise = likelihood.variance + 1e-8 alpha_kron = self.kron_mvprod(QTs, Y) V_kron = V_kron.reshape(-1, 1) alpha_kron = alpha_kron / (V_kron + noise) alpha_kron = self.kron_mvprod(Qs, alpha_kron) log_likelihood = -0.5 * (np.dot(Y.T, alpha_kron) + np.sum((np.log(V_kron + noise))) + N*log_2_pi) # compute derivatives wrt parameters Thete derivs = np.zeros(D+2, dtype='object') for t in range(len(derivs)): dKd_dTheta = np.zeros(D, dtype='object') gamma = np.zeros(D, dtype='object') gam = 1 for d in range(D): xg = list(set(X[:,d])) xg = np.reshape(xg, (len(xg), 1)) oneDkernel.lengthscale = kern.lengthscale[d] if t < D: dKd_dTheta[d] = oneDkernel.dKd_dLen(xg, (t==d), lengthscale=kern.lengthscale[t]) #derivative wrt lengthscale elif (t == D): dKd_dTheta[d] = oneDkernel.dKd_dVar(xg) #derivative wrt variance else: dKd_dTheta[d] = np.identity(len(xg)) #derivative wrt noise gamma[d] = np.diag(np.dot(np.dot(QTs[d], dKd_dTheta[d].T), Qs[d])) gam = np.kron(gam, gamma[d]) gam = gam.reshape(-1,1) kappa = self.kron_mvprod(dKd_dTheta, alpha_kron) derivs[t] = 0.5*np.dot(alpha_kron.T,kappa) - 0.5*np.sum(gam / (V_kron + noise)) # separate derivatives dL_dLen = derivs[:D] dL_dVar = derivs[D] dL_dThetaL = derivs[D+1] return GridPosterior(alpha_kron=alpha_kron, QTs=QTs, Qs=Qs, V_kron=V_kron), \ log_likelihood, {'dL_dLen':dL_dLen, 'dL_dVar':dL_dVar, 'dL_dthetaL':dL_dThetaL}
bsd-3-clause
raddreher/CometVisu
utils/docutils/directives/widget_example.py
3
13088
# -*- coding: utf-8 -*- # copyright (c) 2010-2016, Christian Mayer and the CometVisu contributers. # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the Free # Software Foundation; either version 3 of the License, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA from docutils import nodes, statemachine from sphinx.util.nodes import set_source_info from sphinx.directives.code import container_wrapper from docutils.parsers.rst import directives, Directive from docutils.utils.code_analyzer import Lexer, LexerError, NumberLines from os import path from lxml import etree from helper.widget_example_parser import WidgetExampleParser parser = WidgetExampleParser('manual') def align(argument): align_values = ('left', 'center', 'right') return directives.choice(argument, align_values) def editor(argument): align_values = ('attributes', 'elements') return directives.choice(argument, align_values) class WidgetExampleDirective(Directive): """ reStructuredText directive for widget examples. Extracts the example code, validates it against the XSD-File provided by CometVisu and creates an file with the relevant content the screenshot generation tool needs to create the screenshots. Additionally an code block preceeded by references to the screenshots is added to the rst document. .. code-block:: rst ..widget-example:: :hide-source: false :number-lines: 1 <settings design="metal" selector=".widget_container"> <screenshot name="switch_mapping_styling"> <data address="1/4/0">0</data> </screenshot> </settings> <meta> <mappings> <mapping name="OnOff"> <entry value="0">Aus</entry> <entry value="1">An</entry> </mapping> </mappings> <stylings> <styling name="RedGreen"> <entry value="1">red</entry> <entry value="0">green</entry> </styling> </stylings> </meta> <switch on_value="1" off_value="0" mapping="OnOff" styling="RedGreen"> <label>Kanal 1</label> <address transform="DPT:1.001" mode="readwrite">1/1/0</address> <address transform="DPT:1.001" mode="read">1/4/0</address> </switch> @author Tobias Bräutigam @since 0.10.0 """ required_arguments = 0 optional_arguments = 0 final_argument_whitespace = True option_spec = { 'linenos': directives.flag, 'lineno-start': int, 'scale': int, # scale screenshot in percent 'hide-source': directives.unchanged, # true or false 'editor': editor, 'align': align } has_content = True def add_caption(self, caption_string, node): cnode = nodes.Element() # anonymous container for parsing sl = statemachine.StringList([caption_string], source='') self.state.nested_parse(sl, self.content_offset, cnode) caption = nodes.caption(caption_string, '', *cnode) if 'align' in self.options: caption['align'] = self.options['align'] else: caption['align'] = 'center' node += caption def run(self): config = None # meta_node = None # settings_node = None # global_caption = None show_source = True editor = self.options['editor'] if 'editor' in self.options else None self.assert_has_content() source = "\n".join(self.content) source_path = self.state_machine.document.settings._source.split("doc/manual/", 1)[1] screenshot_dir = path.join("doc", "manual", path.dirname(self.state_machine.document.settings._source).split("doc/manual/", 1)[1], "_static") parser.set_screenshot_dir(screenshot_dir) name = source_path[:-4].replace("/", "_") # visu_config_parts = self.config_parts.copy() parse_result = parser.parse(source, name) # try: # # we need one surrouding element to prevent parse errors # xml = etree.fromstring("<root>%s</root>" % source) # for child in xml: # if etree.iselement(child): # # if child.tag == "settings": # # meta settings # settings_node = child # elif child.tag == "meta": # # config meta settings # meta_node = child # elif child.tag == "caption": # global_caption = child.text # else: # # the config example # config = child # except Exception as e: # print("Parse error: %s" % str(e)) # example_content = etree.tostring(config, encoding='utf-8') # if meta_node is not None: # example_content = b"...\n%s...\n%s" % (etree.tostring(meta_node, encoding='utf-8'), example_content) # visu_config_parts['meta'] = etree.tostring(meta_node, encoding='utf-8').decode('utf-8') # # settings = { # "selector": ".widget_container", # "screenshots": [], # "screenshotDir": screenshot_dir # } # example_content = parse_result['example_content'] # visu_config_parts['meta'] = parse_result['meta_content'] # settings = parse_result['settings'] # settings['screenshotDir'] = screenshot_dir # global_caption = parse_result['global_caption'] if 'scale' in self.options: scale = max(1, min(100, int(self.options['scale'] or 100))) parse_result['settings']['scale'] = scale if editor is not None: # change screenshot + selector parse_result['settings']['editor'] = editor parse_result['settings']['widget'] = parse_result['example_tag'] if editor == "attributes": parse_result['settings']['selector'] = ".treeType_%s ul.attributes" % parse_result['example_tag'] elif editor == "elements": parse_result['settings']['selector'] = ".treeType_%s" % parse_result['example_tag'] parse_result['settings']['screenshots'].append({ "name": "%s_editor_%s" % (name, editor), "data": {} }) show_source = False try: parser.save_screenshot_control_files(parse_result, name, editor=editor is not None) except etree.XMLSyntaxError as e: raise self.error(str(e)) # elif settings_node is not None: # # read meta settings # design = settings_node.get("design", "metal") # settings['selector'] = settings_node.get("selector", ".widget_container") # if settings_node.get("sleep"): # settings['sleep'] = settings_node.get("sleep") # # for screenshot in settings_node.iter('screenshot'): # shot = { # "name": screenshot.get("name", name + str(counters[name] + shot_index)), # "data": [] # } # if screenshot.get("sleep"): # shot['sleep'] = screenshot.get("sleep") # if screenshot.get("clickpath", None): # shot['clickPath'] = screenshot.get('clickpath') # if screenshot.get("waitfor", None): # shot['waitFor'] = screenshot.get('waitfor') # # shot_index += 1 # # for data in screenshot.iter('data'): # values = { # 'address': data.get("address", "0/0/0"), # 'value': data.text # } # if data.get("type"): # values['type'] = data.get("type") # # shot['data'].append(values) # # for caption in screenshot.iter('caption'): # if 'caption' not in shot: # shot['caption'] = caption.text # else: # shot['caption'] += caption.text # # settings['screenshots'].append(shot) # # for caption in settings_node.iterchildren('caption'): # global_caption = caption.text # # no screenshots defined, add a default one # if len(settings['screenshots']) == 0: # settings['screenshots'].append({ # "name": name + str(shot_index) # }) # # replace the design value in the config # visu_config_parts['start'] = visu_config_parts['start'].replace("%%%DESIGN%%%", design) # if config.tag == "page": # visu_config_parts['content_start'] = "" # visu_config_parts['content_end'] = "" # # # build the real config source # visu_config = visu_config_parts['start'] + \ # visu_config_parts['meta'] + \ # visu_config_parts['content_start'] + \ # etree.tostring(config, encoding='utf-8').decode('utf-8') + \ # visu_config_parts['content_end'] + \ # visu_config_parts['end'] # # # validate generated config against XSD # try: # etree.fromstring(visu_config, parser) # except etree.XMLSyntaxError as e: # raise self.error(str(e)) # # if not path.exists(self.example_dir): # makedirs(self.example_dir) # # with open("%s_%s.xml" % (path.join(self.example_dir, name), parser.counters[name]), encoding='utf-8', mode="w") as f: # f.write(u"%s\n%s" % (json.dumps(settings), visu_config)) # create the code-block classes = ['code', 'xml'] # set up lexical analyzer try: tokens = Lexer(parse_result['display_content'], 'xml', self.state.document.settings.syntax_highlight) except LexerError as error: raise self.warning(error) if 'number-lines' in self.options: # optional argument `startline`, defaults to 1 try: startline = int(self.options['number-lines'] or 1) except ValueError: raise self.error(':number-lines: with non-integer start value') endline = startline + len(self.content) # add linenumber filter: tokens = NumberLines(tokens, startline, endline) if 'hide-source' in self.options and show_source: show_source = self.options['hide-source'] != "true" res_nodes = [] for shot in parse_result['settings']['screenshots']: reference = "_static/%s.png" % shot['name'] options = dict(uri=reference) if 'caption' in shot: options['alt'] = shot['caption'] image_node = nodes.image(rawsource=shot['name'], **options) figure_node = nodes.figure('', image_node) if 'align' in self.options: figure_node['align'] = self.options['align'] if 'caption' in shot: self.add_caption(shot['caption'], figure_node) elif not show_source and parse_result['global_caption'] and len(parse_result['settings']['screenshots']) == 1: self.add_caption(parse_result['global_caption'], figure_node) res_nodes.append(figure_node) if show_source: example_content = parse_result['display_content'].decode('utf-8') node = nodes.literal_block(example_content, example_content) node['language'] = 'xml' node['linenos'] = 'linenos' in self.options or \ 'lineno-start' in self.options node['classes'] += self.options.get('class', []) set_source_info(self, node) if parse_result['global_caption']: self.options.setdefault('name', nodes.fully_normalize_name(parse_result['global_caption'])) node = container_wrapper(self, node, parse_result['global_caption']) self.add_name(node) res_nodes.append(node) return res_nodes
gpl-3.0
linsicai/or-tools
examples/python/safe_cracking.py
5
2681
# Copyright 2010 Hakan Kjellerstrand hakank@bonetmail.com # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Safe cracking puzzle in Google CP Solver. From the Oz Primer: http://www.comp.nus.edu.sg/~henz/projects/puzzles/digits/index.html ''' The code of Professor Smart's safe is a sequence of 9 distinct nonzero digits C1 .. C9 such that the following equations and inequations are satisfied: C4 - C6 = C7 C1 * C2 * C3 = C8 + C9 C2 + C3 + C6 < C8 C9 < C8 and C1 <> 1, C2 <> 2, ..., C9 <> 9 can you find the correct combination? ''' Compare with the following models: * MiniZinc: http://www.hakank.org/minizinc/safe_cracking.mzn * ECLiPSe : http://www.hakank.org/eclipse/safe_cracking.ecl * SICStus : http://www.hakank.org/sicstus/safe_cracking.pl * Gecode: http://hakank.org/gecode/safe_cracking.cpp This model was created by Hakan Kjellerstrand (hakank@bonetmail.com) Also see my other Google CP Solver models: http://www.hakank.org/google_or_tools/ """ from __future__ import print_function from ortools.constraint_solver import pywrapcp def main(): # Create the solver. solver = pywrapcp.Solver('Safe cracking puzzle') # # data # n = 9 digits = list(range(1, n + 1)) # # variables # LD = [solver.IntVar(digits, 'LD[%i]' % i) for i in range(n)] C1, C2, C3, C4, C5, C6, C7, C8, C9 = LD # # constraints # solver.Add(solver.AllDifferent(LD)) solver.Add(C4 - C6 == C7) solver.Add(C1 * C2 * C3 == C8 + C9) solver.Add(C2 + C3 + C6 < C8) solver.Add(C9 < C8) for i in range(n): solver.Add(LD[i] != i + 1) # # search and result # db = solver.Phase(LD, solver.INT_VAR_DEFAULT, solver.INT_VALUE_DEFAULT) solver.NewSearch(db) num_solutions = 0 while solver.NextSolution(): num_solutions += 1 print('LD:', [LD[i].Value() for i in range(n)]) solver.EndSearch() print() print('num_solutions:', num_solutions) print('failures:', solver.Failures()) print('branches:', solver.Branches()) print('WallTime:', solver.WallTime(), 'ms') if __name__ == '__main__': main()
apache-2.0
MicroWorldwide/namebench
nb_third_party/dns/version.py
215
1267
# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """dnspython release version information.""" MAJOR = 1 MINOR = 8 MICRO = 0 RELEASELEVEL = 0x0f SERIAL = 0 if RELEASELEVEL == 0x0f: version = '%d.%d.%d' % (MAJOR, MINOR, MICRO) elif RELEASELEVEL == 0x00: version = '%d.%d.%dx%d' % \ (MAJOR, MINOR, MICRO, SERIAL) else: version = '%d.%d.%d%x%d' % \ (MAJOR, MINOR, MICRO, RELEASELEVEL, SERIAL) hexversion = MAJOR << 24 | MINOR << 16 | MICRO << 8 | RELEASELEVEL << 4 | \ SERIAL
apache-2.0
veridiam/Madcow-Waaltz
madcow/include/chardet/constants.py
237
1484
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Universal charset detector code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 2001 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # Shy Shalom - original C code # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### _debug = 0 eDetecting = 0 eFoundIt = 1 eNotMe = 2 eStart = 0 eError = 1 eItsMe = 2 SHORTCUT_THRESHOLD = 0.95 import __builtin__ if not hasattr(__builtin__, 'False'): False = 0 True = 1 else: False = __builtin__.False True = __builtin__.True
gpl-3.0
heeraj123/oh-mainline
vendor/packages/python-social-auth/social/backends/behance.py
70
1581
""" Behance OAuth2 backend, docs at: http://psa.matiasaguirre.net/docs/backends/behance.html """ from social.backends.oauth import BaseOAuth2 class BehanceOAuth2(BaseOAuth2): """Behance OAuth authentication backend""" name = 'behance' AUTHORIZATION_URL = 'https://www.behance.net/v2/oauth/authenticate' ACCESS_TOKEN_URL = 'https://www.behance.net/v2/oauth/token' ACCESS_TOKEN_METHOD = 'POST' SCOPE_SEPARATOR = '|' EXTRA_DATA = [('username', 'username')] REDIRECT_STATE = False def get_user_id(self, details, response): return response['user']['id'] def get_user_details(self, response): """Return user details from Behance account""" user = response['user'] fullname, first_name, last_name = self.get_user_names( user['display_name'], user['first_name'], user['last_name'] ) return {'username': user['username'], 'fullname': fullname, 'first_name': first_name, 'last_name': last_name, 'email': ''} def extra_data(self, user, uid, response, details=None, *args, **kwargs): # Pull up the embedded user attributes so they can be found as extra # data. See the example token response for possible attributes: # http://www.behance.net/dev/authentication#step-by-step data = response.copy() data.update(response['user']) return super(BehanceOAuth2, self).extra_data(user, uid, data, details, *args, **kwargs)
agpl-3.0
jmartinezchaine/OpenERP
openerp/addons/hr_attendance/wizard/hr_attendance_bymonth.py
9
2081
# -*- coding: utf-8 -*- ############################################################################### # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from osv import osv, fields class hr_attendance_bymonth(osv.osv_memory): _name = 'hr.attendance.month' _description = 'Print Monthly Attendance Report' _columns = { 'month': fields.selection([(1, 'January'), (2, 'February'), (3, 'March'), (4, 'April'), (5, 'May'), (6, 'June'), (7, 'July'), (8, 'August'), (9, 'September'), (10, 'October'), (11, 'November'), (12, 'December')], 'Month', required=True), 'year': fields.integer('Year', required=True) } _defaults = { 'month': lambda *a: time.gmtime()[1], 'year': lambda *a: time.gmtime()[0], } def print_report(self, cr, uid, ids, context=None): datas = { 'ids': [], 'active_ids': context['active_ids'], 'model': 'hr.employee', 'form': self.read(cr, uid, ids)[0] } return { 'type': 'ir.actions.report.xml', 'report_name': 'hr.attendance.bymonth', 'datas': datas, } hr_attendance_bymonth() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
duhzecca/cinder
cinder/api/views/versions.py
22
3392
# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import os from oslo_config import cfg versions_opts = [ cfg.StrOpt('public_endpoint', default=None, help="Public url to use for versions endpoint. The default " "is None, which will use the request's host_url " "attribute to populate the URL base. If Cinder is " "operating behind a proxy, you will want to change " "this to represent the proxy's URL."), ] CONF = cfg.CONF CONF.register_opts(versions_opts) def get_view_builder(req): base_url = CONF.public_endpoint or req.application_url return ViewBuilder(base_url) class ViewBuilder(object): def __init__(self, base_url): """Initialize ViewBuilder. :param base_url: url of the root wsgi application """ self.base_url = base_url def build_choices(self, VERSIONS, req): version_objs = [] for version in VERSIONS: version = VERSIONS[version] version_objs.append({ "id": version['id'], "status": version['status'], "links": [{"rel": "self", "href": self.generate_href(version['id'], req.path), }, ], "media-types": version['media-types'], }) return dict(choices=version_objs) def build_versions(self, versions): version_objs = [] for version in sorted(versions.keys()): version = versions[version] version_objs.append({ "id": version['id'], "status": version['status'], "updated": version['updated'], "links": self._build_links(version), }) return dict(versions=version_objs) def build_version(self, version): reval = copy.deepcopy(version) reval['links'].insert(0, { "rel": "self", "href": self.base_url.rstrip('/') + '/', }) return dict(version=reval) def _build_links(self, version_data): """Generate a container of links that refer to the provided version.""" href = self.generate_href(version_data['id']) links = [{'rel': 'self', 'href': href, }, ] return links def generate_href(self, version, path=None): """Create an url that refers to a specific version_number.""" if version.find('v1.') == 0: version_number = 'v1' else: version_number = 'v2' if path: path = path.strip('/') return os.path.join(self.base_url, version_number, path) else: return os.path.join(self.base_url, version_number) + '/'
apache-2.0
alexallah/django
tests/csrf_tests/tests.py
37
30653
import logging import re from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.http import HttpRequest from django.middleware.csrf import ( CSRF_SESSION_KEY, CSRF_TOKEN_LENGTH, REASON_BAD_TOKEN, REASON_NO_CSRF_COOKIE, CsrfViewMiddleware, _compare_salted_tokens as equivalent_tokens, get_token, ) from django.test import SimpleTestCase, override_settings from django.test.utils import patch_logger from django.views.decorators.csrf import csrf_exempt, requires_csrf_token from .views import ( ensure_csrf_cookie_view, non_token_view_using_request_processor, post_form_view, token_view, ) class TestingHttpRequest(HttpRequest): """ A version of HttpRequest that allows us to change some things more easily """ def __init__(self): super().__init__() # A real session backend isn't needed. self.session = {} def is_secure(self): return getattr(self, '_is_secure_override', False) class CsrfViewMiddlewareTestMixin: """ Shared methods and tests for session-based and cookie-based tokens. """ _csrf_id = _csrf_id_cookie = '1bcdefghij2bcdefghij3bcdefghij4bcdefghij5bcdefghij6bcdefghijABCD' def _get_GET_no_csrf_cookie_request(self): return TestingHttpRequest() def _get_GET_csrf_cookie_request(self): raise NotImplementedError('This method must be implemented by a subclass.') def _get_POST_csrf_cookie_request(self): req = self._get_GET_csrf_cookie_request() req.method = "POST" return req def _get_POST_no_csrf_cookie_request(self): req = self._get_GET_no_csrf_cookie_request() req.method = "POST" return req def _get_POST_request_with_token(self): req = self._get_POST_csrf_cookie_request() req.POST['csrfmiddlewaretoken'] = self._csrf_id return req def _check_token_present(self, response, csrf_id=None): text = str(response.content, response.charset) match = re.search("name='csrfmiddlewaretoken' value='(.*?)'", text) csrf_token = csrf_id or self._csrf_id self.assertTrue( match and equivalent_tokens(csrf_token, match.group(1)), "Could not find csrfmiddlewaretoken to match %s" % csrf_token ) def test_process_response_get_token_not_used(self): """ If get_token() is not called, the view middleware does not add a cookie. """ # This is important to make pages cacheable. Pages which do call # get_token(), assuming they use the token, are not cacheable because # the token is specific to the user req = self._get_GET_no_csrf_cookie_request() # non_token_view_using_request_processor does not call get_token(), but # does use the csrf request processor. By using this, we are testing # that the view processor is properly lazy and doesn't call get_token() # until needed. CsrfViewMiddleware().process_view(req, non_token_view_using_request_processor, (), {}) resp = non_token_view_using_request_processor(req) resp2 = CsrfViewMiddleware().process_response(req, resp) csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, False) self.assertIs(csrf_cookie, False) # Check the request processing def test_process_request_no_csrf_cookie(self): """ If no CSRF cookies is present, the middleware rejects the incoming request. This will stop login CSRF. """ with patch_logger('django.security.csrf', 'warning') as logger_calls: req = self._get_POST_no_csrf_cookie_request() req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertEqual(403, req2.status_code) self.assertEqual(logger_calls[0], 'Forbidden (%s): ' % REASON_NO_CSRF_COOKIE) def test_process_request_csrf_cookie_no_token(self): """ If a CSRF cookie is present but no token, the middleware rejects the incoming request. """ with patch_logger('django.security.csrf', 'warning') as logger_calls: req = self._get_POST_csrf_cookie_request() req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertEqual(403, req2.status_code) self.assertEqual(logger_calls[0], 'Forbidden (%s): ' % REASON_BAD_TOKEN) def test_process_request_csrf_cookie_and_token(self): """ If both a cookie and a token is present, the middleware lets it through. """ req = self._get_POST_request_with_token() req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNone(req2) def test_process_request_csrf_cookie_no_token_exempt_view(self): """ If a CSRF cookie is present and no token, but the csrf_exempt decorator has been applied to the view, the middleware lets it through """ req = self._get_POST_csrf_cookie_request() req2 = CsrfViewMiddleware().process_view(req, csrf_exempt(post_form_view), (), {}) self.assertIsNone(req2) def test_csrf_token_in_header(self): """ The token may be passed in a header instead of in the form. """ req = self._get_POST_csrf_cookie_request() req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNone(req2) @override_settings(CSRF_HEADER_NAME='HTTP_X_CSRFTOKEN_CUSTOMIZED') def test_csrf_token_in_header_with_customized_name(self): """ settings.CSRF_HEADER_NAME can be used to customize the CSRF header name """ req = self._get_POST_csrf_cookie_request() req.META['HTTP_X_CSRFTOKEN_CUSTOMIZED'] = self._csrf_id req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNone(req2) def test_put_and_delete_rejected(self): """ HTTP PUT and DELETE methods have protection """ req = TestingHttpRequest() req.method = 'PUT' with patch_logger('django.security.csrf', 'warning') as logger_calls: req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertEqual(403, req2.status_code) self.assertEqual(logger_calls[0], 'Forbidden (%s): ' % REASON_NO_CSRF_COOKIE) req = TestingHttpRequest() req.method = 'DELETE' with patch_logger('django.security.csrf', 'warning') as logger_calls: req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertEqual(403, req2.status_code) self.assertEqual(logger_calls[0], 'Forbidden (%s): ' % REASON_NO_CSRF_COOKIE) def test_put_and_delete_allowed(self): """ HTTP PUT and DELETE can get through with X-CSRFToken and a cookie. """ req = self._get_GET_csrf_cookie_request() req.method = 'PUT' req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNone(req2) req = self._get_GET_csrf_cookie_request() req.method = 'DELETE' req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNone(req2) # Tests for the template tag method def test_token_node_no_csrf_cookie(self): """ CsrfTokenNode works when no CSRF cookie is set. """ req = self._get_GET_no_csrf_cookie_request() resp = token_view(req) token = get_token(req) self.assertIsNotNone(token) self._check_token_present(resp, token) def test_token_node_empty_csrf_cookie(self): """ A new token is sent if the csrf_cookie is the empty string. """ req = self._get_GET_no_csrf_cookie_request() req.COOKIES[settings.CSRF_COOKIE_NAME] = "" CsrfViewMiddleware().process_view(req, token_view, (), {}) resp = token_view(req) token = get_token(req) self.assertIsNotNone(token) self._check_token_present(resp, token) def test_token_node_with_csrf_cookie(self): """ CsrfTokenNode works when a CSRF cookie is set. """ req = self._get_GET_csrf_cookie_request() CsrfViewMiddleware().process_view(req, token_view, (), {}) resp = token_view(req) self._check_token_present(resp) def test_get_token_for_exempt_view(self): """ get_token still works for a view decorated with 'csrf_exempt'. """ req = self._get_GET_csrf_cookie_request() CsrfViewMiddleware().process_view(req, csrf_exempt(token_view), (), {}) resp = token_view(req) self._check_token_present(resp) def test_get_token_for_requires_csrf_token_view(self): """ get_token() works for a view decorated solely with requires_csrf_token. """ req = self._get_GET_csrf_cookie_request() resp = requires_csrf_token(token_view)(req) self._check_token_present(resp) def test_token_node_with_new_csrf_cookie(self): """ CsrfTokenNode works when a CSRF cookie is created by the middleware (when one was not already present) """ req = self._get_GET_no_csrf_cookie_request() CsrfViewMiddleware().process_view(req, token_view, (), {}) resp = token_view(req) resp2 = CsrfViewMiddleware().process_response(req, resp) csrf_cookie = resp2.cookies[settings.CSRF_COOKIE_NAME] self._check_token_present(resp, csrf_id=csrf_cookie.value) def test_cookie_not_reset_on_accepted_request(self): """ The csrf token used in posts is changed on every request (although stays equivalent). The csrf cookie should not change on accepted requests. If it appears in the response, it should keep its value. """ req = self._get_POST_request_with_token() CsrfViewMiddleware().process_view(req, token_view, (), {}) resp = token_view(req) resp = CsrfViewMiddleware().process_response(req, resp) csrf_cookie = resp.cookies.get(settings.CSRF_COOKIE_NAME, None) if csrf_cookie: self.assertEqual( csrf_cookie.value, self._csrf_id_cookie, "CSRF cookie was changed on an accepted request" ) @override_settings(DEBUG=True, ALLOWED_HOSTS=['www.example.com']) def test_https_bad_referer(self): """ A POST HTTPS request with a bad referer is rejected """ req = self._get_POST_request_with_token() req._is_secure_override = True req.META['HTTP_HOST'] = 'www.example.com' req.META['HTTP_REFERER'] = 'https://www.evil.org/somepage' req.META['SERVER_PORT'] = '443' response = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertContains( response, 'Referer checking failed - https://www.evil.org/somepage does not ' 'match any trusted origins.', status_code=403, ) @override_settings(DEBUG=True) def test_https_malformed_referer(self): """ A POST HTTPS request with a bad referer is rejected. """ malformed_referer_msg = 'Referer checking failed - Referer is malformed.' req = self._get_POST_request_with_token() req._is_secure_override = True req.META['HTTP_REFERER'] = 'http://http://www.example.com/' response = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertContains( response, 'Referer checking failed - Referer is insecure while host is secure.', status_code=403, ) # Empty req.META['HTTP_REFERER'] = '' response = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertContains(response, malformed_referer_msg, status_code=403) # Non-ASCII req.META['HTTP_REFERER'] = 'ØBöIß' response = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertContains(response, malformed_referer_msg, status_code=403) # missing scheme # >>> urlparse('//example.com/') # ParseResult(scheme='', netloc='example.com', path='/', params='', query='', fragment='') req.META['HTTP_REFERER'] = '//example.com/' response = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertContains(response, malformed_referer_msg, status_code=403) # missing netloc # >>> urlparse('https://') # ParseResult(scheme='https', netloc='', path='', params='', query='', fragment='') req.META['HTTP_REFERER'] = 'https://' response = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertContains(response, malformed_referer_msg, status_code=403) @override_settings(ALLOWED_HOSTS=['www.example.com']) def test_https_good_referer(self): """ A POST HTTPS request with a good referer is accepted. """ req = self._get_POST_request_with_token() req._is_secure_override = True req.META['HTTP_HOST'] = 'www.example.com' req.META['HTTP_REFERER'] = 'https://www.example.com/somepage' req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNone(req2) @override_settings(ALLOWED_HOSTS=['www.example.com']) def test_https_good_referer_2(self): """ A POST HTTPS request with a good referer is accepted where the referer contains no trailing slash. """ # See ticket #15617 req = self._get_POST_request_with_token() req._is_secure_override = True req.META['HTTP_HOST'] = 'www.example.com' req.META['HTTP_REFERER'] = 'https://www.example.com' req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNone(req2) def _test_https_good_referer_behind_proxy(self): req = self._get_POST_request_with_token() req._is_secure_override = True req.META.update({ 'HTTP_HOST': '10.0.0.2', 'HTTP_REFERER': 'https://www.example.com/somepage', 'SERVER_PORT': '8080', 'HTTP_X_FORWARDED_HOST': 'www.example.com', 'HTTP_X_FORWARDED_PORT': '443', }) req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNone(req2) @override_settings(ALLOWED_HOSTS=['www.example.com'], CSRF_TRUSTED_ORIGINS=['dashboard.example.com']) def test_https_csrf_trusted_origin_allowed(self): """ A POST HTTPS request with a referer added to the CSRF_TRUSTED_ORIGINS setting is accepted. """ req = self._get_POST_request_with_token() req._is_secure_override = True req.META['HTTP_HOST'] = 'www.example.com' req.META['HTTP_REFERER'] = 'https://dashboard.example.com' req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNone(req2) @override_settings(ALLOWED_HOSTS=['www.example.com'], CSRF_TRUSTED_ORIGINS=['.example.com']) def test_https_csrf_wildcard_trusted_origin_allowed(self): """ A POST HTTPS request with a referer that matches a CSRF_TRUSTED_ORIGINS wildcard is accepted. """ req = self._get_POST_request_with_token() req._is_secure_override = True req.META['HTTP_HOST'] = 'www.example.com' req.META['HTTP_REFERER'] = 'https://dashboard.example.com' response = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNone(response) def _test_https_good_referer_matches_cookie_domain(self): req = self._get_POST_request_with_token() req._is_secure_override = True req.META['HTTP_REFERER'] = 'https://foo.example.com/' req.META['SERVER_PORT'] = '443' response = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNone(response) def _test_https_good_referer_matches_cookie_domain_with_different_port(self): req = self._get_POST_request_with_token() req._is_secure_override = True req.META['HTTP_HOST'] = 'www.example.com' req.META['HTTP_REFERER'] = 'https://foo.example.com:4443/' req.META['SERVER_PORT'] = '4443' response = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNone(response) def test_ensures_csrf_cookie_no_logging(self): """ ensure_csrf_cookie() doesn't log warnings (#19436). """ class TestHandler(logging.Handler): def emit(self, record): raise Exception("This shouldn't have happened!") logger = logging.getLogger('django.request') test_handler = TestHandler() old_log_level = logger.level try: logger.addHandler(test_handler) logger.setLevel(logging.WARNING) req = self._get_GET_no_csrf_cookie_request() ensure_csrf_cookie_view(req) finally: logger.removeHandler(test_handler) logger.setLevel(old_log_level) def test_post_data_read_failure(self): """ #20128 -- IOErrors during POST data reading should be caught and treated as if the POST data wasn't there. """ class CsrfPostRequest(HttpRequest): """ HttpRequest that can raise an IOError when accessing POST data """ def __init__(self, token, raise_error): super().__init__() self.method = 'POST' self.raise_error = False self.COOKIES[settings.CSRF_COOKIE_NAME] = token # Handle both cases here to prevent duplicate code in the # session tests. self.session = {} self.session[CSRF_SESSION_KEY] = token self.POST['csrfmiddlewaretoken'] = token self.raise_error = raise_error def _load_post_and_files(self): raise IOError('error reading input data') def _get_post(self): if self.raise_error: self._load_post_and_files() return self._post def _set_post(self, post): self._post = post POST = property(_get_post, _set_post) token = ('ABC' + self._csrf_id)[:CSRF_TOKEN_LENGTH] req = CsrfPostRequest(token, raise_error=False) resp = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNone(resp) req = CsrfPostRequest(token, raise_error=True) with patch_logger('django.security.csrf', 'warning') as logger_calls: resp = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertEqual(resp.status_code, 403) self.assertEqual(logger_calls[0], 'Forbidden (%s): ' % REASON_BAD_TOKEN) class CsrfViewMiddlewareTests(CsrfViewMiddlewareTestMixin, SimpleTestCase): def _get_GET_csrf_cookie_request(self): req = TestingHttpRequest() req.COOKIES[settings.CSRF_COOKIE_NAME] = self._csrf_id_cookie return req def _get_POST_bare_secret_csrf_cookie_request(self): req = self._get_POST_no_csrf_cookie_request() req.COOKIES[settings.CSRF_COOKIE_NAME] = self._csrf_id_cookie[:32] return req def _get_POST_bare_secret_csrf_cookie_request_with_token(self): req = self._get_POST_bare_secret_csrf_cookie_request() req.POST['csrfmiddlewaretoken'] = self._csrf_id_cookie[:32] return req def test_ensures_csrf_cookie_no_middleware(self): """ The ensure_csrf_cookie() decorator works without middleware. """ req = self._get_GET_no_csrf_cookie_request() resp = ensure_csrf_cookie_view(req) self.assertTrue(resp.cookies.get(settings.CSRF_COOKIE_NAME, False)) self.assertIn('Cookie', resp.get('Vary', '')) def test_ensures_csrf_cookie_with_middleware(self): """ The ensure_csrf_cookie() decorator works with the CsrfViewMiddleware enabled. """ req = self._get_GET_no_csrf_cookie_request() CsrfViewMiddleware().process_view(req, ensure_csrf_cookie_view, (), {}) resp = ensure_csrf_cookie_view(req) resp2 = CsrfViewMiddleware().process_response(req, resp) self.assertTrue(resp2.cookies.get(settings.CSRF_COOKIE_NAME, False)) self.assertIn('Cookie', resp2.get('Vary', '')) def test_csrf_cookie_age(self): """ CSRF cookie age can be set using settings.CSRF_COOKIE_AGE. """ req = self._get_GET_no_csrf_cookie_request() MAX_AGE = 123 with self.settings(CSRF_COOKIE_NAME='csrfcookie', CSRF_COOKIE_DOMAIN='.example.com', CSRF_COOKIE_AGE=MAX_AGE, CSRF_COOKIE_PATH='/test/', CSRF_COOKIE_SECURE=True, CSRF_COOKIE_HTTPONLY=True): # token_view calls get_token() indirectly CsrfViewMiddleware().process_view(req, token_view, (), {}) resp = token_view(req) resp2 = CsrfViewMiddleware().process_response(req, resp) max_age = resp2.cookies.get('csrfcookie').get('max-age') self.assertEqual(max_age, MAX_AGE) def test_csrf_cookie_age_none(self): """ CSRF cookie age does not have max age set and therefore uses session-based cookies. """ req = self._get_GET_no_csrf_cookie_request() MAX_AGE = None with self.settings(CSRF_COOKIE_NAME='csrfcookie', CSRF_COOKIE_DOMAIN='.example.com', CSRF_COOKIE_AGE=MAX_AGE, CSRF_COOKIE_PATH='/test/', CSRF_COOKIE_SECURE=True, CSRF_COOKIE_HTTPONLY=True): # token_view calls get_token() indirectly CsrfViewMiddleware().process_view(req, token_view, (), {}) resp = token_view(req) resp2 = CsrfViewMiddleware().process_response(req, resp) max_age = resp2.cookies.get('csrfcookie').get('max-age') self.assertEqual(max_age, '') def test_process_view_token_too_long(self): """ If the token is longer than expected, it is ignored and a new token is created. """ req = self._get_GET_no_csrf_cookie_request() req.COOKIES[settings.CSRF_COOKIE_NAME] = 'x' * 100000 CsrfViewMiddleware().process_view(req, token_view, (), {}) resp = token_view(req) resp2 = CsrfViewMiddleware().process_response(req, resp) csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, False) self.assertEqual(len(csrf_cookie.value), CSRF_TOKEN_LENGTH) def test_process_view_token_invalid_chars(self): """ If the token contains non-alphanumeric characters, it is ignored and a new token is created. """ token = ('!@#' + self._csrf_id)[:CSRF_TOKEN_LENGTH] req = self._get_GET_no_csrf_cookie_request() req.COOKIES[settings.CSRF_COOKIE_NAME] = token CsrfViewMiddleware().process_view(req, token_view, (), {}) resp = token_view(req) resp2 = CsrfViewMiddleware().process_response(req, resp) csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, False) self.assertEqual(len(csrf_cookie.value), CSRF_TOKEN_LENGTH) self.assertNotEqual(csrf_cookie.value, token) def test_bare_secret_accepted_and_replaced(self): """ The csrf token is reset from a bare secret. """ req = self._get_POST_bare_secret_csrf_cookie_request_with_token() req2 = CsrfViewMiddleware().process_view(req, token_view, (), {}) self.assertIsNone(req2) resp = token_view(req) resp = CsrfViewMiddleware().process_response(req, resp) self.assertIn(settings.CSRF_COOKIE_NAME, resp.cookies, "Cookie was not reset from bare secret") csrf_cookie = resp.cookies[settings.CSRF_COOKIE_NAME] self.assertEqual(len(csrf_cookie.value), CSRF_TOKEN_LENGTH) self._check_token_present(resp, csrf_id=csrf_cookie.value) @override_settings(ALLOWED_HOSTS=['www.example.com'], CSRF_COOKIE_DOMAIN='.example.com', USE_X_FORWARDED_PORT=True) def test_https_good_referer_behind_proxy(self): """ A POST HTTPS request is accepted when USE_X_FORWARDED_PORT=True. """ self._test_https_good_referer_behind_proxy() @override_settings(ALLOWED_HOSTS=['www.example.com'], CSRF_COOKIE_DOMAIN='.example.com') def test_https_good_referer_matches_cookie_domain(self): """ A POST HTTPS request with a good referer should be accepted from a subdomain that's allowed by CSRF_COOKIE_DOMAIN. """ self._test_https_good_referer_matches_cookie_domain() @override_settings(ALLOWED_HOSTS=['www.example.com'], CSRF_COOKIE_DOMAIN='.example.com') def test_https_good_referer_matches_cookie_domain_with_different_port(self): """ A POST HTTPS request with a good referer should be accepted from a subdomain that's allowed by CSRF_COOKIE_DOMAIN and a non-443 port. """ self._test_https_good_referer_matches_cookie_domain_with_different_port() @override_settings(CSRF_COOKIE_DOMAIN='.example.com', DEBUG=True) def test_https_reject_insecure_referer(self): """ A POST HTTPS request from an insecure referer should be rejected. """ req = self._get_POST_request_with_token() req._is_secure_override = True req.META['HTTP_REFERER'] = 'http://example.com/' req.META['SERVER_PORT'] = '443' response = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertContains( response, 'Referer checking failed - Referer is insecure while host is secure.', status_code=403, ) @override_settings(CSRF_USE_SESSIONS=True, CSRF_COOKIE_DOMAIN=None) class CsrfViewMiddlewareUseSessionsTests(CsrfViewMiddlewareTestMixin, SimpleTestCase): """ CSRF tests with CSRF_USE_SESSIONS=True. """ def _get_POST_bare_secret_csrf_cookie_request(self): req = self._get_POST_no_csrf_cookie_request() req.session[CSRF_SESSION_KEY] = self._csrf_id_cookie[:32] return req def _get_GET_csrf_cookie_request(self): req = TestingHttpRequest() req.session[CSRF_SESSION_KEY] = self._csrf_id_cookie return req def test_no_session_on_request(self): msg = ( 'CSRF_USE_SESSIONS is enabled, but request.session is not set. ' 'SessionMiddleware must appear before CsrfViewMiddleware in MIDDLEWARE.' ) with self.assertRaisesMessage(ImproperlyConfigured, msg): CsrfViewMiddleware().process_view(HttpRequest(), None, (), {}) def test_process_response_get_token_used(self): """The ensure_csrf_cookie() decorator works without middleware.""" req = self._get_GET_no_csrf_cookie_request() ensure_csrf_cookie_view(req) self.assertTrue(req.session.get(CSRF_SESSION_KEY, False)) def test_ensures_csrf_cookie_with_middleware(self): """ The ensure_csrf_cookie() decorator works with the CsrfViewMiddleware enabled. """ req = self._get_GET_no_csrf_cookie_request() CsrfViewMiddleware().process_view(req, ensure_csrf_cookie_view, (), {}) resp = ensure_csrf_cookie_view(req) CsrfViewMiddleware().process_response(req, resp) self.assertTrue(req.session.get(CSRF_SESSION_KEY, False)) def test_token_node_with_new_csrf_cookie(self): """ CsrfTokenNode works when a CSRF cookie is created by the middleware (when one was not already present). """ req = self._get_GET_no_csrf_cookie_request() CsrfViewMiddleware().process_view(req, token_view, (), {}) resp = token_view(req) CsrfViewMiddleware().process_response(req, resp) csrf_cookie = req.session[CSRF_SESSION_KEY] self._check_token_present(resp, csrf_id=csrf_cookie) @override_settings( ALLOWED_HOSTS=['www.example.com'], SESSION_COOKIE_DOMAIN='.example.com', USE_X_FORWARDED_PORT=True, DEBUG=True, ) def test_https_good_referer_behind_proxy(self): """ A POST HTTPS request is accepted when USE_X_FORWARDED_PORT=True. """ self._test_https_good_referer_behind_proxy() @override_settings(ALLOWED_HOSTS=['www.example.com'], SESSION_COOKIE_DOMAIN='.example.com') def test_https_good_referer_matches_cookie_domain(self): """ A POST HTTPS request with a good referer should be accepted from a subdomain that's allowed by SESSION_COOKIE_DOMAIN. """ self._test_https_good_referer_matches_cookie_domain() @override_settings(ALLOWED_HOSTS=['www.example.com'], SESSION_COOKIE_DOMAIN='.example.com') def test_https_good_referer_matches_cookie_domain_with_different_port(self): """ A POST HTTPS request with a good referer should be accepted from a subdomain that's allowed by SESSION_COOKIE_DOMAIN and a non-443 port. """ self._test_https_good_referer_matches_cookie_domain_with_different_port() @override_settings(SESSION_COOKIE_DOMAIN='.example.com', DEBUG=True) def test_https_reject_insecure_referer(self): """ A POST HTTPS request from an insecure referer should be rejected. """ req = self._get_POST_request_with_token() req._is_secure_override = True req.META['HTTP_REFERER'] = 'http://example.com/' req.META['SERVER_PORT'] = '443' response = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertContains( response, 'Referer checking failed - Referer is insecure while host is secure.', status_code=403, )
bsd-3-clause
steko/totalopenstation
totalopenstation/tests/test_polar.py
2
2558
import unittest from totalopenstation.formats import Point from totalopenstation.formats.polar import BasePoint, PolarPoint class TestPolar(unittest.TestCase): def setUp(self): self.bp0 = BasePoint(x='0', y='0', z='0', ih='1.0', b_zero_st='0.0') self.bp1 = BasePoint(x='0', y='0', z='0', ih='1.324', b_zero_st='0.0') self.p0 = PolarPoint(angle_unit='deg', z_angle_type='z', dist_type='s', dist=9, angle=180, z_angle=90, th=0, base_point=self.bp0, pid=1, text='Test Point', coordorder='ENZ') self.p1 = PolarPoint(angle_unit='gon', z_angle_type='z', dist_type='s', dist=24.567, angle=34.120, z_angle=100, th=1.500, base_point=self.bp0, pid=2, text='Real Point', coordorder='NEZ') self.p2 = PolarPoint(angle_unit='dms', z_angle_type='z', dist_type='s', dist=13.825, angle=35.45100, z_angle=91.17510, th=1.300, base_point=self.bp1, pid=3, text='Real Point', coordorder='ENZ') def test_polar(self): p0_test = Point(0.0, -9.0, 1.0) self.assertAlmostEqual(self.p0.to_point().x, p0_test.x) self.assertAlmostEqual(self.p0.to_point().y, p0_test.y) self.assertAlmostEqual(self.p0.to_point().z, p0_test.z) p1_test = Point(21.1222392859, 12.5454572076, -0.5) self.assertAlmostEqual(self.p1.to_point().x, p1_test.x) self.assertAlmostEqual(self.p1.to_point().y, p1_test.y) self.assertAlmostEqual(self.p1.to_point().z, p1_test.z) p2_test = Point(8.0757244, 11.21674196, -0.2890493) self.assertAlmostEqual(self.p2.to_point().x, p2_test.x) self.assertAlmostEqual(self.p2.to_point().y, p2_test.y) self.assertAlmostEqual(self.p2.to_point().z, p2_test.z)
gpl-3.0
111t8e/h2o-2
py/testdir_single_jvm/test_GLM2_gamma_rand2.py
9
1635
import unittest, random, sys, time sys.path.extend(['.','..','../..','py']) import h2o, h2o_cmd, h2o_glm, h2o_import as h2i def define_params(): paramDict = { 'standardize': [None, 0,1], 'beta_epsilon': [None, 0.0001], 'ignored_cols': [0,1,15,33,34], 'family': ['gamma'], 'n_folds': [2,3,4], 'lambda': [1e-8, 1e-4], 'alpha': [0,0.5,0.75], 'max_iter': [24], } return paramDict class Basic(unittest.TestCase): def tearDown(self): h2o.check_sandbox_for_errors() @classmethod def setUpClass(cls): global SEED SEED = h2o.setup_random_seed() h2o.init() @classmethod def tearDownClass(cls): h2o.tear_down_cloud() def test_GLM2_gamma_rand2(self): csvPathname = 'standard/covtype.data' parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, schema='put') paramDict = define_params() for trial in range(10): # params is mutable. This is default. params = {'response': 54, 'n_folds': 3, 'family': "gamma", 'alpha': 0.5, 'lambda': 1e-4, 'max_iter': 24} colX = h2o_glm.pickRandGlmParams(paramDict, params) kwargs = params.copy() start = time.time() glm = h2o_cmd.runGLM(timeoutSecs=300, parseResult=parseResult, **kwargs) print "glm end on ", csvPathname, 'took', time.time() - start, 'seconds' h2o_glm.simpleCheckGLM(self, glm, None, **kwargs) print "Trial #", trial, "completed\n" if __name__ == '__main__': h2o.unit_main()
apache-2.0
gauravbose/digital-menu
digimenu2/django/forms/formsets.py
100
17573
from __future__ import unicode_literals from django.core.exceptions import ValidationError from django.forms import Form from django.forms.fields import BooleanField, IntegerField from django.forms.utils import ErrorList from django.forms.widgets import HiddenInput from django.utils import six from django.utils.encoding import python_2_unicode_compatible from django.utils.functional import cached_property from django.utils.html import html_safe from django.utils.safestring import mark_safe from django.utils.six.moves import range from django.utils.translation import ugettext as _, ungettext __all__ = ('BaseFormSet', 'formset_factory', 'all_valid') # special field names TOTAL_FORM_COUNT = 'TOTAL_FORMS' INITIAL_FORM_COUNT = 'INITIAL_FORMS' MIN_NUM_FORM_COUNT = 'MIN_NUM_FORMS' MAX_NUM_FORM_COUNT = 'MAX_NUM_FORMS' ORDERING_FIELD_NAME = 'ORDER' DELETION_FIELD_NAME = 'DELETE' # default minimum number of forms in a formset DEFAULT_MIN_NUM = 0 # default maximum number of forms in a formset, to prevent memory exhaustion DEFAULT_MAX_NUM = 1000 class ManagementForm(Form): """ ``ManagementForm`` is used to keep track of how many form instances are displayed on the page. If adding new forms via javascript, you should increment the count field of this form as well. """ def __init__(self, *args, **kwargs): self.base_fields[TOTAL_FORM_COUNT] = IntegerField(widget=HiddenInput) self.base_fields[INITIAL_FORM_COUNT] = IntegerField(widget=HiddenInput) # MIN_NUM_FORM_COUNT and MAX_NUM_FORM_COUNT are output with the rest of # the management form, but only for the convenience of client-side # code. The POST value of them returned from the client is not checked. self.base_fields[MIN_NUM_FORM_COUNT] = IntegerField(required=False, widget=HiddenInput) self.base_fields[MAX_NUM_FORM_COUNT] = IntegerField(required=False, widget=HiddenInput) super(ManagementForm, self).__init__(*args, **kwargs) @html_safe @python_2_unicode_compatible class BaseFormSet(object): """ A collection of instances of the same Form class. """ def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None, initial=None, error_class=ErrorList): self.is_bound = data is not None or files is not None self.prefix = prefix or self.get_default_prefix() self.auto_id = auto_id self.data = data or {} self.files = files or {} self.initial = initial self.error_class = error_class self._errors = None self._non_form_errors = None def __str__(self): return self.as_table() def __iter__(self): """Yields the forms in the order they should be rendered""" return iter(self.forms) def __getitem__(self, index): """Returns the form at the given index, based on the rendering order""" return self.forms[index] def __len__(self): return len(self.forms) def __bool__(self): """All formsets have a management form which is not included in the length""" return True def __nonzero__(self): # Python 2 compatibility return type(self).__bool__(self) @property def management_form(self): """Returns the ManagementForm instance for this FormSet.""" if self.is_bound: form = ManagementForm(self.data, auto_id=self.auto_id, prefix=self.prefix) if not form.is_valid(): raise ValidationError( _('ManagementForm data is missing or has been tampered with'), code='missing_management_form', ) else: form = ManagementForm(auto_id=self.auto_id, prefix=self.prefix, initial={ TOTAL_FORM_COUNT: self.total_form_count(), INITIAL_FORM_COUNT: self.initial_form_count(), MIN_NUM_FORM_COUNT: self.min_num, MAX_NUM_FORM_COUNT: self.max_num }) return form def total_form_count(self): """Returns the total number of forms in this FormSet.""" if self.is_bound: # return absolute_max if it is lower than the actual total form # count in the data; this is DoS protection to prevent clients # from forcing the server to instantiate arbitrary numbers of # forms return min(self.management_form.cleaned_data[TOTAL_FORM_COUNT], self.absolute_max) else: initial_forms = self.initial_form_count() total_forms = max(initial_forms, self.min_num) + self.extra # Allow all existing related objects/inlines to be displayed, # but don't allow extra beyond max_num. if initial_forms > self.max_num >= 0: total_forms = initial_forms elif total_forms > self.max_num >= 0: total_forms = self.max_num return total_forms def initial_form_count(self): """Returns the number of forms that are required in this FormSet.""" if self.is_bound: return self.management_form.cleaned_data[INITIAL_FORM_COUNT] else: # Use the length of the initial data if it's there, 0 otherwise. initial_forms = len(self.initial) if self.initial else 0 return initial_forms @cached_property def forms(self): """ Instantiate forms at first property access. """ # DoS protection is included in total_form_count() forms = [self._construct_form(i) for i in range(self.total_form_count())] return forms def _construct_form(self, i, **kwargs): """ Instantiates and returns the i-th form instance in a formset. """ defaults = { 'auto_id': self.auto_id, 'prefix': self.add_prefix(i), 'error_class': self.error_class, } if self.is_bound: defaults['data'] = self.data defaults['files'] = self.files if self.initial and 'initial' not in kwargs: try: defaults['initial'] = self.initial[i] except IndexError: pass # Allow extra forms to be empty, unless they're part of # the minimum forms. if i >= self.initial_form_count() and i >= self.min_num: defaults['empty_permitted'] = True defaults.update(kwargs) form = self.form(**defaults) self.add_fields(form, i) return form @property def initial_forms(self): """Return a list of all the initial forms in this formset.""" return self.forms[:self.initial_form_count()] @property def extra_forms(self): """Return a list of all the extra forms in this formset.""" return self.forms[self.initial_form_count():] @property def empty_form(self): form = self.form( auto_id=self.auto_id, prefix=self.add_prefix('__prefix__'), empty_permitted=True, ) self.add_fields(form, None) return form @property def cleaned_data(self): """ Returns a list of form.cleaned_data dicts for every form in self.forms. """ if not self.is_valid(): raise AttributeError("'%s' object has no attribute 'cleaned_data'" % self.__class__.__name__) return [form.cleaned_data for form in self.forms] @property def deleted_forms(self): """ Returns a list of forms that have been marked for deletion. """ if not self.is_valid() or not self.can_delete: return [] # construct _deleted_form_indexes which is just a list of form indexes # that have had their deletion widget set to True if not hasattr(self, '_deleted_form_indexes'): self._deleted_form_indexes = [] for i in range(0, self.total_form_count()): form = self.forms[i] # if this is an extra form and hasn't changed, don't consider it if i >= self.initial_form_count() and not form.has_changed(): continue if self._should_delete_form(form): self._deleted_form_indexes.append(i) return [self.forms[i] for i in self._deleted_form_indexes] @property def ordered_forms(self): """ Returns a list of form in the order specified by the incoming data. Raises an AttributeError if ordering is not allowed. """ if not self.is_valid() or not self.can_order: raise AttributeError("'%s' object has no attribute 'ordered_forms'" % self.__class__.__name__) # Construct _ordering, which is a list of (form_index, order_field_value) # tuples. After constructing this list, we'll sort it by order_field_value # so we have a way to get to the form indexes in the order specified # by the form data. if not hasattr(self, '_ordering'): self._ordering = [] for i in range(0, self.total_form_count()): form = self.forms[i] # if this is an extra form and hasn't changed, don't consider it if i >= self.initial_form_count() and not form.has_changed(): continue # don't add data marked for deletion to self.ordered_data if self.can_delete and self._should_delete_form(form): continue self._ordering.append((i, form.cleaned_data[ORDERING_FIELD_NAME])) # After we're done populating self._ordering, sort it. # A sort function to order things numerically ascending, but # None should be sorted below anything else. Allowing None as # a comparison value makes it so we can leave ordering fields # blank. def compare_ordering_key(k): if k[1] is None: return (1, 0) # +infinity, larger than any number return (0, k[1]) self._ordering.sort(key=compare_ordering_key) # Return a list of form.cleaned_data dicts in the order specified by # the form data. return [self.forms[i[0]] for i in self._ordering] @classmethod def get_default_prefix(cls): return 'form' def non_form_errors(self): """ Returns an ErrorList of errors that aren't associated with a particular form -- i.e., from formset.clean(). Returns an empty ErrorList if there are none. """ if self._non_form_errors is None: self.full_clean() return self._non_form_errors @property def errors(self): """ Returns a list of form.errors for every form in self.forms. """ if self._errors is None: self.full_clean() return self._errors def total_error_count(self): """ Returns the number of errors across all forms in the formset. """ return len(self.non_form_errors()) +\ sum(len(form_errors) for form_errors in self.errors) def _should_delete_form(self, form): """ Returns whether or not the form was marked for deletion. """ return form.cleaned_data.get(DELETION_FIELD_NAME, False) def is_valid(self): """ Returns True if every form in self.forms is valid. """ if not self.is_bound: return False # We loop over every form.errors here rather than short circuiting on the # first failure to make sure validation gets triggered for every form. forms_valid = True # This triggers a full clean. self.errors for i in range(0, self.total_form_count()): form = self.forms[i] if self.can_delete: if self._should_delete_form(form): # This form is going to be deleted so any of its errors # should not cause the entire formset to be invalid. continue forms_valid &= form.is_valid() return forms_valid and not self.non_form_errors() def full_clean(self): """ Cleans all of self.data and populates self._errors and self._non_form_errors. """ self._errors = [] self._non_form_errors = self.error_class() if not self.is_bound: # Stop further processing. return for i in range(0, self.total_form_count()): form = self.forms[i] self._errors.append(form.errors) try: if (self.validate_max and self.total_form_count() - len(self.deleted_forms) > self.max_num) or \ self.management_form.cleaned_data[TOTAL_FORM_COUNT] > self.absolute_max: raise ValidationError(ungettext( "Please submit %d or fewer forms.", "Please submit %d or fewer forms.", self.max_num) % self.max_num, code='too_many_forms', ) if (self.validate_min and self.total_form_count() - len(self.deleted_forms) < self.min_num): raise ValidationError(ungettext( "Please submit %d or more forms.", "Please submit %d or more forms.", self.min_num) % self.min_num, code='too_few_forms') # Give self.clean() a chance to do cross-form validation. self.clean() except ValidationError as e: self._non_form_errors = self.error_class(e.error_list) def clean(self): """ Hook for doing any extra formset-wide cleaning after Form.clean() has been called on every form. Any ValidationError raised by this method will not be associated with a particular form; it will be accessible via formset.non_form_errors() """ pass def has_changed(self): """ Returns true if data in any form differs from initial. """ return any(form.has_changed() for form in self) def add_fields(self, form, index): """A hook for adding extra fields on to each form instance.""" if self.can_order: # Only pre-fill the ordering field for initial forms. if index is not None and index < self.initial_form_count(): form.fields[ORDERING_FIELD_NAME] = IntegerField(label=_('Order'), initial=index + 1, required=False) else: form.fields[ORDERING_FIELD_NAME] = IntegerField(label=_('Order'), required=False) if self.can_delete: form.fields[DELETION_FIELD_NAME] = BooleanField(label=_('Delete'), required=False) def add_prefix(self, index): return '%s-%s' % (self.prefix, index) def is_multipart(self): """ Returns True if the formset needs to be multipart, i.e. it has FileInput. Otherwise, False. """ if self.forms: return self.forms[0].is_multipart() else: return self.empty_form.is_multipart() @property def media(self): # All the forms on a FormSet are the same, so you only need to # interrogate the first form for media. if self.forms: return self.forms[0].media else: return self.empty_form.media def as_table(self): "Returns this formset rendered as HTML <tr>s -- excluding the <table></table>." # XXX: there is no semantic division between forms here, there # probably should be. It might make sense to render each form as a # table row with each field as a td. forms = ' '.join(form.as_table() for form in self) return mark_safe('\n'.join([six.text_type(self.management_form), forms])) def as_p(self): "Returns this formset rendered as HTML <p>s." forms = ' '.join(form.as_p() for form in self) return mark_safe('\n'.join([six.text_type(self.management_form), forms])) def as_ul(self): "Returns this formset rendered as HTML <li>s." forms = ' '.join(form.as_ul() for form in self) return mark_safe('\n'.join([six.text_type(self.management_form), forms])) def formset_factory(form, formset=BaseFormSet, extra=1, can_order=False, can_delete=False, max_num=None, validate_max=False, min_num=None, validate_min=False): """Return a FormSet for the given form class.""" if min_num is None: min_num = DEFAULT_MIN_NUM if max_num is None: max_num = DEFAULT_MAX_NUM # hard limit on forms instantiated, to prevent memory-exhaustion attacks # limit is simply max_num + DEFAULT_MAX_NUM (which is 2*DEFAULT_MAX_NUM # if max_num is None in the first place) absolute_max = max_num + DEFAULT_MAX_NUM attrs = {'form': form, 'extra': extra, 'can_order': can_order, 'can_delete': can_delete, 'min_num': min_num, 'max_num': max_num, 'absolute_max': absolute_max, 'validate_min': validate_min, 'validate_max': validate_max} return type(form.__name__ + str('FormSet'), (formset,), attrs) def all_valid(formsets): """Returns true if every formset in formsets is valid.""" valid = True for formset in formsets: if not formset.is_valid(): valid = False return valid
bsd-3-clause
ogenstad/ansible
lib/ansible/modules/windows/win_webpicmd.py
52
1349
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2015, Peter Mounce <public@neverrunwithscissors.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # this is a windows documentation stub. actual code lives in the .ps1 # file of the same name ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: win_webpicmd version_added: "2.0" short_description: Installs packages using Web Platform Installer command-line description: - Installs packages using Web Platform Installer command-line (U(http://www.iis.net/learn/install/web-platform-installer/web-platform-installer-v4-command-line-webpicmdexe-rtw-release)). - Must be installed and present in PATH (see M(win_chocolatey) module; 'webpicmd' is the package name, and you must install 'lessmsi' first too)? - Install IIS first (see M(win_feature) module). notes: - Accepts EULAs and suppresses reboot - you will need to check manage reboots yourself (see M(win_reboot) module) options: name: description: - Name of the package to be installed. required: yes author: - Peter Mounce (@petemounce) ''' EXAMPLES = r''' # Install URLRewrite2. win_webpicmd: name: URLRewrite2 '''
gpl-3.0
pkuyym/Paddle
python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py
5
2899
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from op_test import OpTest from test_softmax_op import stable_softmax class TestSoftmaxWithCrossEntropyOp(OpTest): """ Test softmax with cross entropy operator with discreate one-hot labels. """ def setUp(self): self.op_type = "softmax_with_cross_entropy" batch_size = 41 class_num = 37 logits = np.random.uniform(0.1, 1.0, [batch_size, class_num]).astype("float64") softmax = np.apply_along_axis(stable_softmax, 1, logits) labels = np.random.randint(0, class_num, [batch_size, 1], dtype="int64") cross_entropy = np.asmatrix( [[-np.log(softmax[i][labels[i][0]])] for i in range(softmax.shape[0])], dtype="float64") self.inputs = {"Logits": logits, "Label": labels} self.outputs = { "Softmax": softmax.astype("float64"), "Loss": cross_entropy.astype("float64") } def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(["Logits"], "Loss") class TestSoftmaxWithCrossEntropyOp2(OpTest): """ Test softmax with cross entropy operator with soft labels. """ def setUp(self): self.op_type = "softmax_with_cross_entropy" batch_size = 41 class_num = 37 logits = np.random.uniform(0.1, 1.0, [batch_size, class_num]).astype("float64") softmax = np.apply_along_axis(stable_softmax, 1, logits) labels = np.random.uniform(0.1, 1.0, [batch_size, class_num]).astype("float64") labels /= np.sum(labels, axis=1, keepdims=True) cross_entropy = (-labels * np.log(softmax)).sum( axis=1, keepdims=True).astype("float64") self.inputs = {"Logits": logits, "Label": labels} self.outputs = { "Softmax": softmax.astype("float64"), "Loss": cross_entropy.astype("float64") } self.attrs = {"soft_label": True} def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(["Logits"], "Loss") if __name__ == "__main__": unittest.main()
apache-2.0
eblossom/gnuradio
gr-audio/examples/python/dial_tone_wav.py
58
2398
#!/usr/bin/env python # # Copyright 2004,2005,2007,2008,2012,2013 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # # GNU Radio example program to record a dial tone to a WAV file from gnuradio import gr from gnuradio import blocks from gnuradio.eng_option import eng_option from optparse import OptionParser try: from gnuradio import analog except ImportError: sys.stderr.write("Error: Program requires gr-analog.\n") sys.exit(1) class my_top_block(gr.top_block): def __init__(self): gr.top_block.__init__(self) usage = "%prog: [options] filename" parser = OptionParser(option_class=eng_option, usage=usage) parser.add_option("-r", "--sample-rate", type="eng_float", default=48000, help="set sample rate to RATE (48000)") parser.add_option("-N", "--samples", type="eng_float", default=None, help="number of samples to record") (options, args) = parser.parse_args () if len(args) != 1 or options.samples is None: parser.print_help() raise SystemExit, 1 sample_rate = int(options.sample_rate) ampl = 0.1 src0 = analog.sig_source_f(sample_rate, analog.GR_SIN_WAVE, 350, ampl) src1 = analog.sig_source_f(sample_rate, analog.GR_SIN_WAVE, 440, ampl) head0 = blocks.head(gr.sizeof_float, int(options.samples)) head1 = blocks.head(gr.sizeof_float, int(options.samples)) dst = blocks.wavfile_sink(args[0], 2, int(options.sample_rate), 16) self.connect(src0, head0, (dst, 0)) self.connect(src1, head1, (dst, 1)) if __name__ == '__main__': try: my_top_block().run() except KeyboardInterrupt: pass
gpl-3.0
tomchristie/django
django/core/management/commands/squashmigrations.py
17
9351
from django.conf import settings from django.core.management.base import BaseCommand, CommandError from django.db import DEFAULT_DB_ALIAS, connections, migrations from django.db.migrations.loader import AmbiguityError, MigrationLoader from django.db.migrations.migration import SwappableTuple from django.db.migrations.optimizer import MigrationOptimizer from django.db.migrations.writer import MigrationWriter from django.utils.version import get_docs_version class Command(BaseCommand): help = "Squashes an existing set of migrations (from first until specified) into a single new one." def add_arguments(self, parser): parser.add_argument( 'app_label', help='App label of the application to squash migrations for.', ) parser.add_argument( 'start_migration_name', default=None, nargs='?', help='Migrations will be squashed starting from and including this migration.', ) parser.add_argument( 'migration_name', help='Migrations will be squashed until and including this migration.', ) parser.add_argument( '--no-optimize', action='store_true', dest='no_optimize', help='Do not try to optimize the squashed operations.', ) parser.add_argument( '--noinput', '--no-input', action='store_false', dest='interactive', help='Tells Django to NOT prompt the user for input of any kind.', ) parser.add_argument( '--squashed-name', dest='squashed_name', help='Sets the name of the new squashed migration.', ) def handle(self, **options): self.verbosity = options['verbosity'] self.interactive = options['interactive'] app_label = options['app_label'] start_migration_name = options['start_migration_name'] migration_name = options['migration_name'] no_optimize = options['no_optimize'] squashed_name = options['squashed_name'] # Load the current graph state, check the app and migration they asked for exists loader = MigrationLoader(connections[DEFAULT_DB_ALIAS]) if app_label not in loader.migrated_apps: raise CommandError( "App '%s' does not have migrations (so squashmigrations on " "it makes no sense)" % app_label ) migration = self.find_migration(loader, app_label, migration_name) # Work out the list of predecessor migrations migrations_to_squash = [ loader.get_migration(al, mn) for al, mn in loader.graph.forwards_plan((migration.app_label, migration.name)) if al == migration.app_label ] if start_migration_name: start_migration = self.find_migration(loader, app_label, start_migration_name) start = loader.get_migration(start_migration.app_label, start_migration.name) try: start_index = migrations_to_squash.index(start) migrations_to_squash = migrations_to_squash[start_index:] except ValueError: raise CommandError( "The migration '%s' cannot be found. Maybe it comes after " "the migration '%s'?\n" "Have a look at:\n" " python manage.py showmigrations %s\n" "to debug this issue." % (start_migration, migration, app_label) ) # Tell them what we're doing and optionally ask if we should proceed if self.verbosity > 0 or self.interactive: self.stdout.write(self.style.MIGRATE_HEADING("Will squash the following migrations:")) for migration in migrations_to_squash: self.stdout.write(" - %s" % migration.name) if self.interactive: answer = None while not answer or answer not in "yn": answer = input("Do you wish to proceed? [yN] ") if not answer: answer = "n" break else: answer = answer[0].lower() if answer != "y": return # Load the operations from all those migrations and concat together, # along with collecting external dependencies and detecting # double-squashing operations = [] dependencies = set() # We need to take all dependencies from the first migration in the list # as it may be 0002 depending on 0001 first_migration = True for smigration in migrations_to_squash: if smigration.replaces: raise CommandError( "You cannot squash squashed migrations! Please transition " "it to a normal migration first: " "https://docs.djangoproject.com/en/%s/topics/migrations/#squashing-migrations" % get_docs_version() ) operations.extend(smigration.operations) for dependency in smigration.dependencies: if isinstance(dependency, SwappableTuple): if settings.AUTH_USER_MODEL == dependency.setting: dependencies.add(("__setting__", "AUTH_USER_MODEL")) else: dependencies.add(dependency) elif dependency[0] != smigration.app_label or first_migration: dependencies.add(dependency) first_migration = False if no_optimize: if self.verbosity > 0: self.stdout.write(self.style.MIGRATE_HEADING("(Skipping optimization.)")) new_operations = operations else: if self.verbosity > 0: self.stdout.write(self.style.MIGRATE_HEADING("Optimizing...")) optimizer = MigrationOptimizer() new_operations = optimizer.optimize(operations, migration.app_label) if self.verbosity > 0: if len(new_operations) == len(operations): self.stdout.write(" No optimizations possible.") else: self.stdout.write( " Optimized from %s operations to %s operations." % (len(operations), len(new_operations)) ) # Work out the value of replaces (any squashed ones we're re-squashing) # need to feed their replaces into ours replaces = [] for migration in migrations_to_squash: if migration.replaces: replaces.extend(migration.replaces) else: replaces.append((migration.app_label, migration.name)) # Make a new migration with those operations subclass = type("Migration", (migrations.Migration, ), { "dependencies": dependencies, "operations": new_operations, "replaces": replaces, }) if start_migration_name: if squashed_name: # Use the name from --squashed-name. prefix, _ = start_migration.name.split('_', 1) name = '%s_%s' % (prefix, squashed_name) else: # Generate a name. name = '%s_squashed_%s' % (start_migration.name, migration.name) new_migration = subclass(name, app_label) else: name = '0001_%s' % (squashed_name or 'squashed_%s' % migration.name) new_migration = subclass(name, app_label) new_migration.initial = True # Write out the new migration file writer = MigrationWriter(new_migration) with open(writer.path, "w", encoding='utf-8') as fh: fh.write(writer.as_string()) if self.verbosity > 0: self.stdout.write(self.style.MIGRATE_HEADING("Created new squashed migration %s" % writer.path)) self.stdout.write(" You should commit this migration but leave the old ones in place;") self.stdout.write(" the new migration will be used for new installs. Once you are sure") self.stdout.write(" all instances of the codebase have applied the migrations you squashed,") self.stdout.write(" you can delete them.") if writer.needs_manual_porting: self.stdout.write(self.style.MIGRATE_HEADING("Manual porting required")) self.stdout.write(" Your migrations contained functions that must be manually copied over,") self.stdout.write(" as we could not safely copy their implementation.") self.stdout.write(" See the comment at the top of the squashed migration for details.") def find_migration(self, loader, app_label, name): try: return loader.get_migration_by_prefix(app_label, name) except AmbiguityError: raise CommandError( "More than one migration matches '%s' in app '%s'. Please be " "more specific." % (name, app_label) ) except KeyError: raise CommandError( "Cannot find a migration matching '%s' from app '%s'." % (name, app_label) )
bsd-3-clause