repo_name
stringlengths
6
100
path
stringlengths
4
294
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
606
896k
license
stringclasses
15 values
analyseuc3m/ANALYSE-v1
common/djangoapps/course_modes/admin.py
30
9561
""" Django admin page for course modes """ from django.conf import settings from django import forms from django.utils.translation import ugettext_lazy as _ from django.contrib import admin from pytz import timezone, UTC from opaque_keys.edx.keys import CourseKey from opaque_keys.edx.locations import SlashSeparatedCourseKey from opaque_keys import InvalidKeyError from util.date_utils import get_time_display from xmodule.modulestore.django import modulestore from course_modes.models import CourseMode, CourseModeExpirationConfig # Technically, we shouldn't be doing this, since verify_student is defined # in LMS, and course_modes is defined in common. # # Once we move the responsibility for administering course modes into # the Course Admin tool, we can remove this dependency and expose # verification deadlines as a separate Django model admin. # # The admin page will work in both LMS and Studio, # but the test suite for Studio will fail because # the verification deadline table won't exist. from lms.djangoapps.verify_student import models as verification_models class CourseModeForm(forms.ModelForm): class Meta(object): model = CourseMode fields = '__all__' COURSE_MODE_SLUG_CHOICES = ( [(CourseMode.DEFAULT_MODE_SLUG, CourseMode.DEFAULT_MODE_SLUG)] + [(mode_slug, mode_slug) for mode_slug in CourseMode.VERIFIED_MODES] + [(CourseMode.NO_ID_PROFESSIONAL_MODE, CourseMode.NO_ID_PROFESSIONAL_MODE)] + [(mode_slug, mode_slug) for mode_slug in CourseMode.CREDIT_MODES] + # need to keep legacy modes around for awhile [(CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG, CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG)] ) mode_slug = forms.ChoiceField(choices=COURSE_MODE_SLUG_CHOICES, label=_("Mode")) # The verification deadline is stored outside the course mode in the verify_student app. # (we used to use the course mode expiration_datetime as both an upgrade and verification deadline). # In order to make this transition easier, we include the verification deadline as a custom field # in the course mode admin form. Longer term, we will deprecate the course mode Django admin # form in favor of an external Course Administration Tool. verification_deadline = forms.SplitDateTimeField( label=_("Verification Deadline"), required=False, help_text=_( "OPTIONAL: After this date/time, users will no longer be able to submit photos for verification. " "This appies ONLY to modes that require verification." ), widget=admin.widgets.AdminSplitDateTime, ) def __init__(self, *args, **kwargs): super(CourseModeForm, self).__init__(*args, **kwargs) default_tz = timezone(settings.TIME_ZONE) if self.instance._expiration_datetime: # pylint: disable=protected-access # django admin is using default timezone. To avoid time conversion from db to form # convert the UTC object to naive and then localize with default timezone. _expiration_datetime = self.instance._expiration_datetime.replace( # pylint: disable=protected-access tzinfo=None ) self.initial["_expiration_datetime"] = default_tz.localize(_expiration_datetime) # Load the verification deadline # Since this is stored on a model in verify student, we need to load it from there. # We need to munge the timezone a bit to get Django admin to display it without converting # it to the user's timezone. We'll make sure we write it back to the database with the timezone # set to UTC later. if self.instance.course_id and self.instance.mode_slug in CourseMode.VERIFIED_MODES: deadline = verification_models.VerificationDeadline.deadline_for_course(self.instance.course_id) self.initial["verification_deadline"] = ( default_tz.localize(deadline.replace(tzinfo=None)) if deadline is not None else None ) def clean_course_id(self): course_id = self.cleaned_data['course_id'] try: course_key = CourseKey.from_string(course_id) except InvalidKeyError: try: course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id) except InvalidKeyError: raise forms.ValidationError("Cannot make a valid CourseKey from id {}!".format(course_id)) if not modulestore().has_course(course_key): raise forms.ValidationError("Cannot find course with id {} in the modulestore".format(course_id)) return course_key def clean__expiration_datetime(self): """ Ensure that the expiration datetime we save uses the UTC timezone. """ # django admin saving the date with default timezone to avoid time conversion from form to db # changes its tzinfo to UTC if self.cleaned_data.get("_expiration_datetime"): return self.cleaned_data.get("_expiration_datetime").replace(tzinfo=UTC) def clean_verification_deadline(self): """ Ensure that the verification deadline we save uses the UTC timezone. """ if self.cleaned_data.get("verification_deadline"): return self.cleaned_data.get("verification_deadline").replace(tzinfo=UTC) def clean(self): """ Clean the form fields. This is the place to perform checks that involve multiple form fields. """ cleaned_data = super(CourseModeForm, self).clean() mode_slug = cleaned_data.get("mode_slug") upgrade_deadline = cleaned_data.get("_expiration_datetime") verification_deadline = cleaned_data.get("verification_deadline") # Allow upgrade deadlines ONLY for the "verified" mode # This avoids a nasty error condition in which the upgrade deadline is set # for a professional education course before the enrollment end date. # When this happens, the course mode expires and students are able to enroll # in the course for free. To avoid this, we explicitly prevent admins from # setting an upgrade deadline for any mode except "verified" (which has an upgrade path). if upgrade_deadline is not None and mode_slug != CourseMode.VERIFIED: raise forms.ValidationError( 'Only the "verified" mode can have an upgrade deadline. ' 'For other modes, please set the enrollment end date in Studio.' ) # Verification deadlines are allowed only for verified modes if verification_deadline is not None and mode_slug not in CourseMode.VERIFIED_MODES: raise forms.ValidationError("Verification deadline can be set only for verified modes.") # Verification deadline must be after the upgrade deadline, # if an upgrade deadline is set. # There are cases in which we might want to set a verification deadline, # but not an upgrade deadline (for example, a professional education course that requires verification). if verification_deadline is not None: if upgrade_deadline is not None and verification_deadline < upgrade_deadline: raise forms.ValidationError("Verification deadline must be after the upgrade deadline.") return cleaned_data def save(self, commit=True): """ Save the form data. """ # Trigger validation so we can access cleaned data if self.is_valid(): course_key = self.cleaned_data.get("course_id") verification_deadline = self.cleaned_data.get("verification_deadline") mode_slug = self.cleaned_data.get("mode_slug") # Since the verification deadline is stored in a separate model, # we need to handle saving this ourselves. # Note that verification deadline can be `None` here if # the deadline is being disabled. if course_key is not None and mode_slug in CourseMode.VERIFIED_MODES: verification_models.VerificationDeadline.set_deadline(course_key, verification_deadline) return super(CourseModeForm, self).save(commit=commit) class CourseModeAdmin(admin.ModelAdmin): """Admin for course modes""" form = CourseModeForm fields = ( 'course_id', 'mode_slug', 'mode_display_name', 'min_price', 'currency', '_expiration_datetime', 'verification_deadline', 'sku' ) search_fields = ('course_id',) list_display = ( 'id', 'course_id', 'mode_slug', 'min_price', 'expiration_datetime_custom', 'sku' ) def expiration_datetime_custom(self, obj): """adding custom column to show the expiry_datetime""" if obj.expiration_datetime: return get_time_display(obj.expiration_datetime, '%B %d, %Y, %H:%M %p') # Display a more user-friendly name for the custom expiration datetime field # in the Django admin list view. expiration_datetime_custom.short_description = "Upgrade Deadline" class CourseModeExpirationConfigAdmin(admin.ModelAdmin): """Admin interface for the course mode auto expiration configuration. """ class Meta(object): model = CourseModeExpirationConfig admin.site.register(CourseMode, CourseModeAdmin) admin.site.register(CourseModeExpirationConfig, CourseModeExpirationConfigAdmin)
agpl-3.0
handroissuazo/tensorflow
tensorflow/python/debug/cli/command_parser.py
17
8042
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Command parsing module for TensorFlow Debugger (tfdbg).""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import ast import re import sys _BRACKETS_PATTERN = re.compile(r"\[[^\]]*\]") _QUOTES_PATTERN = re.compile(r"\"[^\"]*\"") _WHITESPACE_PATTERN = re.compile(r"\s+") def parse_command(command): """Parse command string into a list of arguments. - Disregards whitespace inside double quotes and brackets. - Strips paired leading and trailing double quotes in arguments. - Splits the command at whitespace. Nested double quotes and brackets are not handled. Args: command: (str) Input command. Returns: (list of str) List of arguments. """ command = command.strip() if not command: return [] brackets_intervals = [f.span() for f in _BRACKETS_PATTERN.finditer(command)] quotes_intervals = [f.span() for f in _QUOTES_PATTERN.finditer(command)] whitespaces_intervals = [ f.span() for f in _WHITESPACE_PATTERN.finditer(command) ] if not whitespaces_intervals: return [command] arguments = [] idx0 = 0 for start, end in whitespaces_intervals + [(len(command), None)]: # Skip whitespace stretches enclosed in brackets or double quotes. if not any(interval[0] < start < interval[1] for interval in brackets_intervals + quotes_intervals): argument = command[idx0:start] # Strip leading and trailing double quote if they are paired. if argument.startswith("\"") and argument.endswith("\""): argument = argument[1:-1] arguments.append(argument) idx0 = end return arguments def extract_output_file_path(args): """Extract output file path from command arguments. Args: args: (list of str) command arguments. Returns: (list of str) Command arguments with the output file path part stripped. (str or None) Output file path (if any). Raises: SyntaxError: If there is no file path after the last ">" character. """ if args and args[-1].endswith(">"): raise SyntaxError("Redirect file path is empty") elif args and args[-1].startswith(">"): output_file_path = args[-1][1:] args = args[:-1] elif len(args) > 1 and args[-2] == ">": output_file_path = args[-1] args = args[:-2] elif args and args[-1].count(">") == 1: gt_index = args[-1].index(">") output_file_path = args[-1][gt_index + 1:] args[-1] = args[-1][:gt_index] elif len(args) > 1 and args[-2].endswith(">"): output_file_path = args[-1] args = args[:-1] args[-1] = args[-1][:-1] else: output_file_path = None return args, output_file_path def parse_tensor_name_with_slicing(in_str): """Parse tensor name, potentially suffixed by slicing string. Args: in_str: (str) Input name of the tensor, potentially followed by a slicing string. E.g.: Without slicing string: "hidden/weights/Variable:0", with slicing string: "hidden/weights/Variable:0[1, :]" Returns: (str) name of the tensor (str) slicing string, if any. If no slicing string is present, return "". """ if in_str.count("[") == 1 and in_str.endswith("]"): tensor_name = in_str[:in_str.index("[")] tensor_slicing = in_str[in_str.index("["):] else: tensor_name = in_str tensor_slicing = "" return tensor_name, tensor_slicing def validate_slicing_string(slicing_string): """Validate a slicing string. Check if the input string contains only brackets, digits, commas and colons that are valid characters in numpy-style array slicing. Args: slicing_string: (str) Input slicing string to be validated. Returns: (bool) True if and only if the slicing string is valid. """ return bool(re.search(r"^\[(\d|,|\s|:)+\]$", slicing_string)) def parse_indices(indices_string): """Parse a string representing indices. For example, if the input is "[1, 2, 3]", the return value will be a list of indices: [1, 2, 3] Args: indices_string: (str) a string representing indices. Can optionally be surrounded by a pair of brackets. Returns: (list of int): Parsed indices. """ # Strip whitespace. indices_string = re.sub(r"\s+", "", indices_string) # Strip any brackets at the two ends. if indices_string.startswith("[") and indices_string.endswith("]"): indices_string = indices_string[1:-1] return [int(element) for element in indices_string.split(",")] def parse_ranges(range_string): """Parse a string representing numerical range(s). Args: range_string: (str) A string representing a numerical range or a list of them. For example: "[-1.0,1.0]", "[-inf, 0]", "[[-inf, -1.0], [1.0, inf]]" Returns: (list of list of float) A list of numerical ranges parsed from the input string. Raises: ValueError: If the input doesn't represent a range or a list of ranges. """ range_string = range_string.strip() if not range_string: return [] if "inf" in range_string: range_string = re.sub(r"inf", repr(sys.float_info.max), range_string) ranges = ast.literal_eval(range_string) if isinstance(ranges, list) and not isinstance(ranges[0], list): ranges = [ranges] # Verify that ranges is a list of list of numbers. for item in ranges: if len(item) != 2: raise ValueError("Incorrect number of elements in range") elif not isinstance(item[0], (int, float)): raise ValueError("Incorrect type in the 1st element of range: %s" % type(item[0])) elif not isinstance(item[1], (int, float)): raise ValueError("Incorrect type in the 2nd element of range: %s" % type(item[0])) return ranges def parse_readable_size_str(size_str): """Convert a human-readable str representation to number of bytes. Only the units "kB", "MB", "GB" are supported. The "B character at the end of the input `str` may be omitted. Args: size_str: (`str`) A human-readable str representing a number of bytes (e.g., "0", "1023", "1.1kB", "24 MB", "23GB", "100 G". Returns: (`int`) The parsed number of bytes. Raises: ValueError: on failure to parse the input `size_str`. """ size_str = size_str.strip() if size_str.endswith("B"): size_str = size_str[:-1] if size_str.isdigit(): return int(size_str) elif size_str.endswith("k"): return int(float(size_str[:-1]) * 1024) elif size_str.endswith("M"): return int(float(size_str[:-1]) * 1048576) elif size_str.endswith("G"): return int(float(size_str[:-1]) * 1073741824) else: raise ValueError("Failed to parsed human-readable byte size str: \"%s\"" % size_str) def evaluate_tensor_slice(tensor, tensor_slicing): """Call eval on the slicing of a tensor, with validation. Args: tensor: (numpy ndarray) The tensor value. tensor_slicing: (str or None) Slicing of the tensor, e.g., "[:, 1]". If None, no slicing will be performed on the tensor. Returns: (numpy ndarray) The sliced tensor. Raises: ValueError: If tensor_slicing is not a valid numpy ndarray slicing str. """ _ = tensor if not validate_slicing_string(tensor_slicing): raise ValueError("Invalid tensor-slicing string.") return eval("tensor" + tensor_slicing) # pylint: disable=eval-used
apache-2.0
ctuning/ck-env
soft/compiler.llvm.android.ndk/customize.py
1
23467
# # Collective Knowledge (individual environment - setup) # # See CK LICENSE.txt for licensing details # See CK COPYRIGHT.txt for copyright details # # Developer: Grigori Fursin, Grigori.Fursin@cTuning.org, http://fursin.net # import os ############################################################################## # customize directories to automatically find and register software def dirs(i): hosd=i['host_os_dict'] phosd=hosd.get('ck_name','') dirs=i.get('dirs', []) if phosd=='win': win_dir = 'C:\\Users\\All Users\\Microsoft' if os.path.isdir(win_dir): dirs.append(win_dir) return {'return':0, 'dirs':dirs} ############################################################################## # prepare env def version_cmd(i): fp=i['full_path'] cmdx=i['cmd'] if ' ' in fp: fp='"'+fp+'"' cmd=fp+' '+cmdx return {'return':0, 'cmd':cmd} ############################################################################## # limit directories def limit(i): hosd=i.get('host_os_dict',{}) tosd=i.get('target_os_dict',{}) phosd=hosd.get('ck_name','') macos=hosd.get('macos', '') hbits=hosd.get('bits','') tbits=tosd.get('bits','') long_os_name = 'windows' if phosd=='win' else ('darwin' if macos else 'linux') prebuilt = long_os_name + '-x86' + ('_64' if hbits=='64' else '') acp=tosd.get('android_compiler_prefix','') if acp=='': return {'return':1, 'error':'android_compiler_prefix is not specified in target OS meta'} atc=tosd.get('android_toolchain','') if atc=='': return {'return':1, 'error':'android_toolchain is not specified in target OS meta'} fn='clang' if phosd=='win': fn+='.exe' dr=i.get('list',[]) drx=[] for q in dr: p0=os.path.dirname(q) p1=os.path.join(p0,'toolchains','llvm','prebuilt',prebuilt,'bin',fn) if os.path.isfile(p1): if p1 not in drx: drx.append(p1) return {'return':0, 'list':drx} ############################################################################## # parse software version def parse_version(i): lst=i['output'] ver='' for q in lst: q=q.strip().lower() if q!='': j=q.find(' version ') if j>0 and (q.startswith('clang') or q.startswith('android clang')): q=q[j+9:] j=q.find(' ') if j>0: q=q[:j] ver=q break j=q.find('clang version ') if j>0: j1=q.find(' ', j+14) if j1>0: ver=q[j+14:j1].strip() break if ver=='': ck.out('') ck.out(' WARNING: can\'t detect clang version from the following output:') for q in lst: ck.out(' '+q) ck.out('') return {'return':0, 'version':ver} ############################################################################## # setup environment setup def setup(i): """ Input: { cfg - meta of this soft entry self_cfg - meta of module soft ck_kernel - import CK kernel module (to reuse functions) host_os_uoa - host OS UOA host_os_uid - host OS UID host_os_dict - host OS meta target_os_uoa - target OS UOA target_os_uid - target OS UID target_os_dict - target OS meta target_device_id - target device ID (if via ADB) tags - list of tags used to search this entry env - updated environment vars from meta customize - updated customize vars from meta deps - resolved dependencies for this soft interactive - if 'yes', can ask questions, otherwise quiet } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 bat - prepared string for bat file } """ import os # Get variables ck=i['ck_kernel'] s='' fos=i.get('features',{}).get('os',{}) os_name_long=fos.get('name_long','') iv=i.get('interactive','') env=i['env'] cfg=i.get('cfg',{}) deps=i.get('deps',{}) tags=i['tags'] cus=i['customize'] hos=i['host_os_uid'] tos=i['host_os_uid'] tdid=i['target_device_id'] hosd=i.get('host_os_dict',{}) target_d=i.get('target_os_dict',{}) winh=hosd.get('windows_base','') win=target_d.get('windows_base','') remote=target_d.get('remote','') mingw=target_d.get('mingw','') tbits=target_d.get('bits','') sdirs=hosd.get('dir_sep','') hplat=hosd.get('ck_name','') macos=hosd.get('macos','') envp=cus.get('env_prefix','') pi=cus.get('path_install','') fp=cus.get('full_path','') tp='' arch=target_d.get('android_ndk_arch','') # Check NDK ndk_gcc=deps.get('ndk-gcc', {}) ndk_gcc_env=ndk_gcc.get('dict',{}).get('env',{}) ndk=ndk_gcc_env.get('CK_SYS_ROOT','') ndk_ver='' ndk_iver=0 j=ndk.find('android-ndk-r') if j>=0: j1=ndk.find('/', j+1) j2=ndk.find('\\', j+1) if j2>=0 and j1>j2: j1=j2 ndk_ver=ndk[j+13:j1] if len(ndk_ver)==3: ndk_iver=ck.safe_int(ndk_ver[:2],0) # Need to check that if path has spaces on Windows, then convert to non-space format, # otherwise many issues with CMAKE ... if winh=='yes' and ' ' in fp: cmd='@for %%A in ("'+fp+'") do echo %%~sA' r=ck.access({'action':'shell', 'module_uoa':'os', 'host_os':hos, 'target_os':tos, 'device_id':tdid, 'cmd':cmd, 'split_to_list':'yes'}) if r['return']>0: return r x=r['stdout_lst'] if len(x)>2 and x[0]=='': y=x[2] if len(y)>0: pp1=os.path.dirname(fp) pp2=os.path.dirname(pp1) pp3=os.path.dirname(pp2) pp4=os.path.dirname(pp3) # since later will need real long name (to detect arch) fp=y ck.out('') ck.out(' Removed spaces from Windows path: '+fp) ck.out('') # Check path ep=cus.get('env_prefix','') if ep!='' and fp!='': p1=os.path.dirname(fp) pi=os.path.dirname(p1) p2=os.path.dirname(pi) p3=os.path.dirname(p2) p4=os.path.dirname(p3) p5=os.path.dirname(p4) ndk_path=p5 ver=ndk_gcc.get('ver', '')[:-2] abi=target_d.get('abi','') env[ep]=pi env[ep+'_BIN']=p1 pxl_gnustl_static = os.path.join(ndk_path, 'sources', 'cxx-stl', 'gnu-libstdc++', ver, 'libs', abi, 'libgnustl_static.a') pxl_gnustl_shared = os.path.join(ndk_path, 'sources', 'cxx-stl', 'gnu-libstdc++', ver, 'libs', abi, 'libgnustl_shared.so') if ndk_iver>=17: ck.out('') ck.out('NDK version '+str(ndk_iver)+' >= 17 - using LLVM C++ library ...') pxi=os.path.join(ndk_path, 'sources', 'cxx-stl', 'llvm-libc++', 'include') if not os.path.isdir(pxi): return {'return':1, 'error':'LLVM C++ include path not found: '+pxi} env['CK_ENV_LIB_STDCPP_INCLUDE']=pxi pxai=os.path.join(ndk_path, 'sources', 'cxx-stl', 'llvm-libc++abi', 'include') if not os.path.isdir(pxai): return {'return':1, 'error':'LLVM C++ include path not found: '+pxai} env['CK_ENV_LIB_STDCPP_INCLUDE_EXTRA']=pxai pxl_static=os.path.join(ndk_path, 'sources', 'cxx-stl', 'llvm-libc++', 'libs', abi, 'libc++_static.a') if not os.path.isfile(pxl_static): return {'return':1, 'error':'LLVM C++ lib not found: '+pxl_static} pxla_static=os.path.join(ndk_path, 'sources', 'cxx-stl', 'llvm-libc++', 'libs', abi, 'libc++abi.a') if not os.path.isfile(pxla_static): return {'return':1, 'error':'LLVM C++ lib not found: '+pxla_static} env['CK_ENV_LIB_STDCPP_STATIC'] = pxl_static+' '+pxla_static pxl_shared=os.path.join(ndk_path, 'sources', 'cxx-stl', 'llvm-libc++', 'libs', abi, 'libc++_shared.so') if not os.path.isfile(pxl_shared): return {'return':1, 'error':'LLVM C++ lib not found: '+pxl_shared} env['CK_ENV_LIB_STDCPP_DYNAMIC'] = pxl_shared env['CK_ENV_LIB_GNUSTL_STATIC'] = pxl_gnustl_static env['CK_ENV_LIB_GNUSTL_DYNAMIC'] = pxl_gnustl_shared else: env['CK_ENV_LIB_STDCPP_INCLUDE']=os.path.join(ndk_path, 'sources', 'cxx-stl', 'gnu-libstdc++', ver, 'include') env['CK_ENV_LIB_STDCPP_INCLUDE_EXTRA']=os.path.join(ndk_path, 'sources', 'cxx-stl', 'gnu-libstdc++', ver, 'libs', abi, 'include') env['CK_ENV_LIB_STDCPP_STATIC'] = pxl_gnustl_static env['CK_ENV_LIB_STDCPP_DYNAMIC'] = pxl_gnustl_shared cus['path_lib']=pi+sdirs+'lib' cus['path_include']=pi+sdirs+'include' # if ndk_gcc_env.get('CK_ENV_LIB_STDCPP_STATIC','')!='': # env['CK_ENV_LIB_STDCPP_STATIC']=ndk_gcc_env['CK_ENV_LIB_STDCPP_STATIC'] # if ndk_gcc_env.get('CK_ENV_LIB_STDCPP_DYNAMIC','')!='': # env['CK_ENV_LIB_STDCPP_DYNAMIC']=ndk_gcc_env['CK_ENV_LIB_STDCPP_DYNAMIC'] # if ndk_gcc_env.get('CK_ENV_LIB_STDCPP_INCLUDE_EXTRA','')!='': # env['CK_ENV_LIB_STDCPP_INCLUDE_EXTRA']=ndk_gcc_env['CK_ENV_LIB_STDCPP_INCLUDE_EXTRA'] if hplat=='linux': sname=cus.get('soft_file',{}).get(hplat,'') pname=os.path.basename(fp) if pname.startswith(sname+'-'): tp=pname[len(sname):] if cus.get('tool_prefix','')=='': cus['tool_prefix_configured']='yes' cus['tool_prefix']='' if cus.get('tool_postfix','')=='': cus['tool_postfix_configured']='yes' cus['tool_postfix']=tp if cus.get('retarget','')=='': cus['retarget']='no' ############################################################ if winh=='yes': env.update({ "CK_AFTER_COMPILE_TO_BC": "ren *.o *", "CK_ASM_EXT": ".s", "CK_BC_EXT": ".bc", "CK_COMPILER_ENABLE_EXCEPTIONS": "-fcxx-exceptions", "CK_CC": "$#tool_prefix#$clang", "CK_COMPILER_FLAG_CPP17": "-std=c++17", "CK_COMPILER_FLAG_CPP14": "-std=c++14", "CK_COMPILER_FLAG_CPP11": "-std=c++11", "CK_COMPILER_FLAG_CPP0X": "-std=c++0x", "CK_COMPILER_FLAG_OPENMP": "-fopenmp", "CK_COMPILER_FLAG_PTHREAD_LIB": "-lpthread", "CK_COMPILER_FLAG_STD90": "-std=c90", "CK_COMPILER_FLAG_STD99": "-std=c99", "CK_CXX": "$#tool_prefix#$clang++", "CK_F90": "", "CK_F95": "", "CK_FC": "", "CK_FLAGS_CREATE_ASM": "-S", "CK_FLAGS_CREATE_BC": "-c -emit-llvm", "CK_FLAGS_CREATE_OBJ": "-c", "CK_FLAGS_DYNAMIC_BIN": "", "CK_FLAGS_OUTPUT": "-o", "CK_FLAG_PREFIX_INCLUDE": "-I", "CK_FLAG_PREFIX_LIB_DIR": "-L", "CK_FLAG_PREFIX_VAR": "-D", "CK_LINKER_FLAG_OPENMP": "-fopenmp", "CK_MAKE": "nmake", "CK_OBJ_EXT": ".o", "CK_OPT_SIZE": "-Os", "CK_OPT_SPEED": "-O3", "CK_OPT_SPEED_SAFE": "-O2", "CK_PLUGIN_FLAG": "-fplugin=", "CM_INTERMEDIATE_OPT_TOOL": "opt", "CM_INTERMEDIATE_OPT_TOOL_OUT": "-o", "CK_FLAGS_DLL_NO_LIBCMT": " ", }) # Modify if Android if remote=='yes': env.update({ "CK_AR": "%CK_ANDROID_COMPILER_PREFIX%-ar", "CK_COMPILER_FLAG_GPROF": "-pg", "CK_DLL_EXT": ".so", "CK_EXE_EXT": ".out", "CK_EXTRA_LIB_DL": "-ldl", "CK_EXTRA_LIB_M": "-lm", "CK_FLAGS_DLL": "-shared -fPIC", "CK_FLAGS_DLL_EXTRA": "", "CK_FLAGS_STATIC_BIN": "-static -fPIC", "CK_FLAGS_STATIC_LIB": "-fPIC", "CK_LB": "%CK_ANDROID_COMPILER_PREFIX%-ar rcs", "CK_LB_OUTPUT": "-o ", "CK_LD_FLAGS_EXTRA": "", "CK_LIB_EXT": ".a", "CK_LD_DYNAMIC_FLAGS": "", "CK_LD_FLAGS_EXTRA": "", "CK_OBJDUMP": "%CK_ANDROID_COMPILER_PREFIX%-objdump -d", "CK_PROFILER": "gprof"}) else: env.update({ "CK_AR": "lib", "CK_DLL_EXT": ".dll", "CK_EXE_EXT": ".exe", "CK_EXTRA_LIB_DL": "", "CK_EXTRA_LIB_M": "", "CK_FLAGS_DLL": "", "CK_FLAGS_DLL_EXTRA": "-Xlinker /dll", "CK_FLAGS_STATIC_BIN": "-static -Wl,/LARGEADDRESSAWARE:NO", "CK_FLAGS_STATIC_LIB": "-fPIC", "CK_LB": "lib", "CK_LB_OUTPUT": "/OUT:", "CK_LD_DYNAMIC_FLAGS": "", "CK_LD_FLAGS_EXTRA": "", "CK_LIB_EXT": ".lib", "CK_OBJDUMP": "llvm-objdump -d"}) prefix_configured=cus.get('tool_prefix_configured','') prefix=cus.get('tool_prefix','') if prefix!='': env['CK_COMPILER_PREFIX']=prefix cus['tool_prefix']=prefix cus['tool_prefix_configured']='yes' for k in env: v=env[k] v=v.replace('$#tool_prefix#$',prefix) env[k]=v retarget=cus.get('retarget','') lfr=cus.get('linking_for_retargeting','') if remote=='yes': ### Android target ######################################################### # x=env.get('CK_COMPILER_FLAGS_OBLIGATORY','') y='-target %CK_ANDROID_TOOLCHAIN% -gcc-toolchain %CK_ENV_COMPILER_GCC% --sysroot=%CK_SYS_ROOT%' x='' # if arch=='arm64': # x='-fPIE -pie ' x='-fPIE -pie ' env["CK_COMPILER_FLAGS_OBLIGATORY"]=x+y else: env["CK_COMPILER_FLAGS_OBLIGATORY"]="-DWINDOWS" if retarget=='yes' and lfr!='': cus['linking_for_retargeting']=lfr env['CK_LD_FLAGS_EXTRA']=lfr add_m32=cus.get('add_m32','') if env.get('CK_COMPILER_ADD_M32','').lower()=='yes' or os.environ.get('CK_COMPILER_ADD_M32','').lower()=='yes': add_m32='yes' cus['add_m32']='yes' # if add_m32=='' and iv=='yes' and tbits=='32': # ra=ck.inp({'text':'Target OS is 32 bit. Add -m32 to compilation flags (y/N)? '}) # x=ra['string'].strip().lower() # if x=='y' or x=='yes': # add_m32='yes' # cus['add_m32']='yes' x=env.get('CK_COMPILER_FLAGS_OBLIGATORY','') if remote!='yes': if x.find('-DWINDOWS')<0: x+=' -DWINDOWS' if tbits=='32' and add_m32=='yes' and x.find('-m32')<0: x+=' -m32' y=cus.get('add_to_ck_compiler_flags_obligatory','') if y!='' and x.find(y)<0: x+=' '+y y='-target i686-pc-windows-msvc' if tbits=='64': y='-target x86_64-pc-windows-msvc' if x.find(y)<0: x+=' '+y if mingw=='yes': env['CK_MAKE']='mingw32-make' env['CK_COMPILER_FLAGS_OBLIGATORY']=x x=env.get('CK_CXX','') if x!='' and x.find('-fpermissive')<0: x+=' -fpermissive' env['CK_CXX']=x x=cus.get('add_extra_path','') if x!='': s+='\nset PATH='+pi+x+';%PATH%\n\n' else: ### Linux Host ######################################################### env.update({ "CK_AR": "$#tool_prefix#$ar", "CK_ASM_EXT": ".s", "CK_CC": "$#tool_prefix#$clang$#tool_postfix#$", "CK_LLVM_CONFIG": "$#tool_prefix#$llvm-config$#tool_postfix#$", "CK_COMPILER_FLAGS_OBLIGATORY": "", "CK_COMPILER_ENABLE_EXCEPTIONS": "-fcxx-exceptions", "CK_COMPILER_FLAG_CPP17": "-std=c++17", "CK_COMPILER_FLAG_CPP14": "-std=c++14", "CK_COMPILER_FLAG_CPP11": "-std=c++11", "CK_COMPILER_FLAG_CPP0X": "-std=c++0x", "CK_COMPILER_FLAG_GPROF": "-pg", "CK_COMPILER_FLAG_OPENMP": "-fopenmp", "CK_COMPILER_FLAG_PLUGIN": "-fplugin=", "CK_COMPILER_FLAG_PTHREAD_LIB": "-lpthread", "CK_CXX": "$#tool_prefix#$clang++$#tool_postfix#$", "CK_DLL_EXT": ".so", "CK_EXE_EXT": ".out", "CK_EXTRA_LIB_DL": "-ldl", "CK_EXTRA_LIB_M": "-lm", "CK_FLAGS_CREATE_ASM": "-S", "CK_FLAGS_CREATE_OBJ": "-c", "CK_FLAGS_DLL": "-shared -fPIC", "CK_FLAGS_DLL_EXTRA": "", "CK_FLAGS_OUTPUT": "-o ", "CK_FLAGS_STATIC_BIN": "-static -fPIC", "CK_FLAGS_STATIC_LIB": "-fPIC", "CK_FLAG_PREFIX_INCLUDE": "-I", "CK_FLAG_PREFIX_LIB_DIR": "-L", "CK_FLAG_PREFIX_VAR": "-D", "CK_GPROF_OUT_FILE": "gmon.out", "CK_LD_FLAGS_EXTRA": "", "CK_LIB_EXT": ".a", "CK_LINKER_FLAG_OPENMP": "-fopenmp", "CK_MAKE": "make", "CK_OBJDUMP": "$#tool_prefix#$objdump -d", "CK_OBJ_EXT": ".o", "CK_OPT_SIZE": "-Os", "CK_OPT_SPEED": "-O3", "CK_OPT_SPEED_SAFE": "-O2", "CK_PLUGIN_FLAG": "-fplugin=", "CK_PROFILER": "gprof" }) # Modify if Android if remote=='yes': env.update({ "CK_AR": "${CK_ANDROID_COMPILER_PREFIX}-ar", "CK_COMPILER_FLAG_GPROF": "-pg", "CK_DLL_EXT": ".so", "CK_EXE_EXT": ".out", "CK_EXTRA_LIB_DL": "-ldl", "CK_EXTRA_LIB_M": "-lm", "CK_FLAGS_DLL": "-shared -fPIC", "CK_FLAGS_DLL_EXTRA": "", "CK_FLAGS_STATIC_BIN": "-static -fPIC", "CK_FLAGS_STATIC_LIB": "-fPIC", "CK_LB": "${CK_ANDROID_COMPILER_PREFIX}-ar rcs", "CK_LB_OUTPUT": "-o ", "CK_LD_FLAGS_EXTRA": "", "CK_LIB_EXT": ".a", "CK_LD_DYNAMIC_FLAGS": "", "CK_LD_FLAGS_EXTRA": "", "CK_OBJDUMP": "${CK_ANDROID_COMPILER_PREFIX}-objdump -d", "CK_PROFILER": "gprof"}) elif macos=='yes': env["CK_LB"]="$#tool_prefix#$ar -rcs" env["CK_LB_OUTPUT"]="" else: env["CK_LB"]="$#tool_prefix#$ar rcs" env["CK_LB_OUTPUT"]="-o " # Ask a few more questions # (tool prefix) prefix_configured=cus.get('tool_prefix_configured','') prefix=cus.get('tool_prefix','') env['CK_COMPILER_PREFIX']=prefix cus['tool_prefix']=prefix cus['tool_prefix_configured']='yes' for k in env: v=env[k] v=v.replace('$#tool_prefix#$',prefix) env[k]=v # (tool postfix such as -3.6) postfix_configured=cus.get('tool_postfix_configured','') postfix=cus.get('tool_postfix','') if postfix_configured!='yes': ck.out('') ra=ck.inp({'text':'Input clang postfix if needed (for example, -3.6 for clang-3.6) or Enter to skip: '}) postfix=ra['string'].strip() env['CK_COMPILER_POSTFIX']=postfix cus['tool_postfix']=postfix cus['tool_postfix_configured']='yes' for k in env: v=env[k] # Hack to check that sometimes clang++-3.x is not available if k=='CK_CXX': pxx=os.path.join(env.get(ep+'_BIN',''),v.replace('$#tool_postfix#$',postfix)) if not os.path.isfile(pxx): v=v.replace('$#tool_postfix#$','') v=v.replace('$#tool_postfix#$',postfix) env[k]=v retarget=cus.get('retarget','') lfr=cus.get('linking_for_retargeting','') if retarget=='yes' and lfr!='': cus['linking_for_retargeting']=lfr env['CK_LD_FLAGS_EXTRA']=lfr if remote=='yes': ### Android target ######################################################### # x=env.get('CK_COMPILER_FLAGS_OBLIGATORY','') y='-target $CK_ANDROID_TOOLCHAIN -gcc-toolchain $CK_ENV_COMPILER_GCC --sysroot=$CK_SYS_ROOT' x='' # if arch=='arm64': # x='-fPIE -pie ' x='-fPIE -pie ' env["CK_COMPILER_FLAGS_OBLIGATORY"]=' '+x+y else: ### Linux Host ######################################################### add_m32=cus.get('add_m32','') if add_m32=='' and iv=='yes' and tbits=='32': ra=ck.inp({'text':'Target OS is 32 bit. Add -m32 to compilation flags (y/N)? '}) x=ra['string'].strip().lower() if x=='y' or x=='yes': add_m32='yes' cus['add_m32']='yes' x=env.get('CK_COMPILER_FLAGS_OBLIGATORY','') if tbits=='32' and add_m32=='yes' and x.find('-m32')<0: x+=' -m32' y=cus.get('add_to_ck_compiler_flags_obligatory','') if y!='' and x.find(y)<0: x+=' '+y env['CK_COMPILER_FLAGS_OBLIGATORY']=x x=cus.get('add_extra_path','') if x!='': s+='\nexport PATH='+pi+x+':%PATH%\n\n' # Starting from NDK v16 there is no more usr/include path under platform dir, # so we have to add it as explicit -Isysroot/usr/include under ndk root dir. sysroot_include_dir = os.path.join(ndk_path, 'sysroot', 'usr', 'include') env['CK_ENV_LIB_SYSROOT_INCLUDE']=sysroot_include_dir # Trying to form a correct ORDER of include directories to satisfy the #include_next mechanism: # env['CK_COMPILER_FLAGS_OBLIGATORY'] += ' -I'+env['CK_ENV_LIB_STDCPP_INCLUDE']+' -I'+env['CK_ENV_LIB_SYSROOT_INCLUDE'] if os.path.isdir(sysroot_include_dir): asm_include_dirs = { 'arm64': 'aarch64-linux-android', 'arm': 'arm-linux-androideabi', 'x86_64': 'x86_64-linux-android', 'x86': 'i686-linux-android', 'mips': 'mipsel-linux-android', 'mips64': 'mips64el-linux-android', } if arch in asm_include_dirs: env['CK_COMPILER_FLAGS_OBLIGATORY'] += ' -I' + os.path.join(sysroot_include_dir, asm_include_dirs[arch]) # Update global env['CK_COMPILER_TOOLCHAIN_NAME']='clang' env["CK_EXTRA_LIB_ATOMIC"]="-latomic" env['CK_HAS_OPENMP']='0' # for now force not to use OpenMP - later should detect via version if remote=='yes' or os_name_long.find('-arm')>0: y='-mfloat-abi=hard' env.update({'CK_COMPILER_FLAG_MFLOAT_ABI_HARD': y}) # x=env.get('CK_COMPILER_FLAGS_OBLIGATORY','') # if y not in x: # x+=' '+y # env["CK_COMPILER_FLAGS_OBLIGATORY"]=x # Otherwise may be problems on Windows during cross-compiling env['CK_OPT_UNWIND']=' ' env['CK_FLAGS_DYNAMIC_BIN']=' ' return {'return':0, 'bat':s}
bsd-3-clause
kblomqvist/yasha
yasha/cmsis.py
1
14079
""" The MIT License (MIT) Copyright (c) 2015-2021 Kim Blomqvist Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from xml.etree import ElementTree class SVDFile(): """SVD File: Entry class to parse CMSIS-SVD file SVD = System View Description format CMSIS = Cortex Microcontroller Software Interface Standard Read more from http://www.keil.com/pack/doc/CMSIS/SVD/html/ """ def __init__(self, file): if isinstance(file, str): self.root = ElementTree.fromstring(file) else: tree = ElementTree.parse(file) self.root = tree.getroot() self.cpu = None self.device = None self.peripherals = [] self.peripherals_dict = {} # Lookup by peripheral name def parse(self): self.cpu = SvdCpu(self.root.find("cpu")) self.device = SvdDevice(self.root) derived_periphs = [] for elem in self.root.iter("peripheral"): periph = SvdPeripheral(elem, self.device) if periph.derivedFrom is not None: derived_periphs.append(periph.name) self.peripherals.append(periph) self.peripherals_dict[periph.name] = periph for periph in [self.peripherals_dict[name] for name in derived_periphs]: base = self.peripherals_dict[periph.derivedFrom] periph.inherit_from(base) class SvdElement(object): props = [] props_to_integer = [] props_to_boolean = [] def __init__(self, element=None, defaults={}, parent=None): if element is not None: self.from_element(element, defaults) if parent is not None: self.parent = parent def from_element(self, element, defaults={}): """Populate object variables from SVD element""" if isinstance(defaults, SvdElement): defaults = vars(defaults) for key in self.props: try: value = element.find(key).text except AttributeError: # Maybe it's attribute? default = defaults[key] if key in defaults else None value = element.get(key, default) if value is not None: if key in self.props_to_integer: try: value = int(value) except ValueError: # It has to be hex value = int(value, 16) elif key in self.props_to_boolean: value = value.lower() in ("yes", "true", "t", "1") setattr(self, key, value) def inherit_from(self, element): for key, value in vars(self).items(): if not value and key in vars(element): value = getattr(element, key) setattr(self, key, value) def copy(self): import copy return copy.copy(self) def __str__(self): from pprint import pformat return pformat(vars(self), indent=0) class SvdDevice(SvdElement): """SVD Devices element""" props = [ "schemaVersion", "vendor", "vendorID", "name", "series", "version", "description", "licenseText", "headerSystemFilename", "headerDefinitionsPrefix", "addressUnitBits", "width", "size", "access", "protection", "resetValue", "resetMask" ] props_to_integer = [ "width", "size", "resetValue", "resetMask", "addressUnitBits" ] class SvdCpu(SvdElement): """SVD CPU section""" props = [ "name", "revision", "endian", "mpuPresent", "fpuPresent", "fpuDP", "icachePresent", "dcachePresent", "itcmPresent", "dtcmPresent", "vtorPresent", "nvicPrioBits", "vendorSystickConfig", "deviceNumInterrupts" ] props_to_boolean = [ "mpuPresent", "fpuPresent", "fpuDP", "icachePresent", "dcachePresent", "itcmPresent", "dtcmPresent", "vtorPresent" ] class SvdPeripheral(SvdElement): """SVD Peripherals Level A peripheral is a named collection of registers. A peripheral is mapped to a defined base address within the device's address space. A peripheral allocates one or more exclusive address blocks relative to its base address, such that all described registers fit into the allocated address blocks. Allocated addresses without an associated register description are automatically considered reserved. The peripheral can be assigned to a group of peripherals and may be associated with one or more interrupts. """ props = [ "derivedFrom", "dim", "dimIncrement", "dimIndex", "dimName", "name", "version", "description", "alternatePeripheral", "groupName", "appendToName", "headerStructName", "disableCondition", "baseAddress", "size", "access", "protection", "resetValue", "resetMask" ] props_to_integer = [ "dim", "dimIncrement", "baseAddress", "size", "resetValue", "resetMask" ] def from_element(self, element, defaults={}): SvdElement.from_element(self, element, defaults) self.registers = [] self.interrupts = [] self.addressBlock = None try: for reg in element.find("registers"): if reg.tag == "cluster": self.registers.append(Cluster(reg, self, parent=self)) elif reg.tag == "register": reg = SvdRegister(reg, self, parent=self) self.registers.extend(reg.fold()) except TypeError: # element.findall() may return None pass try: for i in element.findall("interrupt"): self.interrupts.append(SvdInterrupt(i, parent=self)) except TypeError: # element.findall() may return None pass try: block = element.find("addressBlock") self.addressBlock = SvdAddressBlock(block, parent=self) except TypeError: pass class SvdRegister(SvdElement): """SVD Registers Level A register is a named, programmable resource that belongs to a peripheral. Registers are mapped to a defined address in the address space of the device. An address is specified relative to the peripheral base address. The description of a register documents the purpose and function of the resource. A debugger requires information about the permitted access to a resource as well as side effects triggered by read and write accesses respectively. """ props = [ "derivedFrom", "dim", "dimIncrement", "dimIndex", "dimName", "name", "displayName", "description", "alternateGroup", "alternateRegister", "addressOffset", "size", "access", "protection", "resetValue", "resetMask", "dataType", "modifiedWriteValues", "readAction" ] props_to_integer = [ "dim", "dimIncrement", "addressOffset", "size", "resetValue", "resetMask" ] def from_element(self, element, defaults={}): SvdElement.from_element(self, element, defaults) self.fields = [] if self.dim is not None: try: self.dimIndex = int(self.dimIndex) except ValueError: try: start, stop = self.dimIndex.split("-") start, stop = (int(start), int(stop)+1) self.dimIndex = list(range(start, stop)) except ValueError: self.dimIndex = self.dimIndex.split(",") try: for elem in element.find("fields"): field = SvdField(elem, self, parent=self) self.fields.append(field) except TypeError: # element.findall() may return None pass try: elem = element.find("writeConstraint") self.writeConstraint = SvdWriteConstraint(elem, parent=self) except TypeError: pass def fold(self): """Folds the Register in accordance with it's dimensions. If the register is dimensionless, the returned list just contains the register itself unchanged. In case the register name looks like a C array, the returned list contains the register itself, where nothing else than the '%s' placeholder in it's name has been replaced with value of the dim element. """ if self.dim is None: return [self] if self.name.endswith("[%s]"): # C array like self.name = self.name.replace("%s", str(self.dim)) return [self] registers = [] for offset, index in enumerate(self.dimIndex): reg = self.copy() reg.name = self.name.replace("%s", str(index)) reg.addressOffset += offset * reg.dimIncrement reg.fields = [field.copy() for field in reg.fields] for field in reg.fields: field.parent = reg reg.dim = reg.dimIndex = reg.dimIncrement = None # Dimensionless registers.append(reg) return registers class Cluster(SvdElement): """SVD Cluster extension level Cluster adds an optional sub-level within the CMSIS SVD registers level. A cluster describes a sequence of neighboring registers within a peripheral. """ props = [ "registers", "derivedFrom", "dim", "dimIncrement", "dimIndex", "name", "description", "alternateCluster", "headerStructName", "addressOffset" ] props_to_integer = ["addressOffset", "dim", "dimIncrement"] def from_element(self, element, defaults={}): SvdElement.from_element(self, element, {}) self.registers = [] # TODO: Should work like Register.to_array(), if there's self.dim self.name = self.name.replace("%s", str(self.dim)) try: for elem in element.findall("*"): if elem.tag == "cluster": # Cluster may include yet another cluster self.registers.append(Cluster(elem, defaults, parent=self)) elif elem.tag == "register": reg = SvdRegister(elem, defaults, parent=self) self.registers.extend(reg.fold()) except TypeError: # element.findall() may return None pass class SvdField(SvdElement): """SVD Fields level All fields of a register are enclosed between the <fields> opening and closing tags. """ props = [ "derivedFrom", "name", "description", "bitOffset", "bitWidth", "lsb", "msb", "bitRange", "access", "modifiedWriteValues", "readAction" ] props_to_integer = ["bitOffset", "bitWidth", "lsb", "msb"] def from_element(self, element, defaults={}): SvdElement.from_element(self, element, defaults) self.enumeratedValues = { "read": [], "write": [], "read-write": [], } if self.bitRange is not None: self.msb, self.lsb = self.bitRange[1:-1].split(":") self.msb = int(self.msb) self.lsb = int(self.lsb) self.bitOffset = self.lsb self.bitWidth = self.msb - self.lsb + 1 elif self.bitOffset is not None and self.bitWidth is not None: self.lsb = self.bitOffset self.msb = self.bitWidth + self.lsb self.bitRange = (self.msb, self.lsb) try: for e in element.findall("enumeratedValues"): try: usage = e.find("usage").text except AttributeError: usage = "read-write" for e in e.findall("enumeratedValue"): enum = SvdEnumeratedValue(e, {}, parent=self) self.enumeratedValues[usage].append(enum) except TypeError: # element.findall() may return None pass try: elem = element.find("writeConstraint") self.writeConstraint = SvdWriteConstraint(elem, parent=self) except TypeError: pass class SvdEnumeratedValue(SvdElement): """SVD Enumerated values Level The concept of enumerated values creates a map between unsigned integers and an identifier string. """ props = ["derivedFrom", "name", "description", "value", "isDefault"] props_to_integer = ["value"] class SvdInterrupt(SvdElement): props = ["name", "description", "value"] props_to_integer = ["value"] class SvdAddressBlock(SvdElement): props = ["addressBlock", "offset", "size", "usage", "protection"] props_to_integer = ["offset", "size"] class SvdWriteConstraint(SvdElement): props = ["writeAsRead", "useEnumeratedValues"] props_to_boolean = ["writeAsRead", "useEnumeratedValues"] def from_element(self, element, defaults={}): SvdElement.from_element(self, element, defaults) try: elem = element.find("range") minimum = elem.find("minimum").text maximum = elem.find("maximum").text self.range = (int(minimum), int(maximum)) except: # No range pass
mit
The01000001Team/PythonGroceryList
beautifulsoup4-4.4.1/doc.zh/source/conf.py
65
8200
# -*- coding: utf-8 -*- # # Beautiful Soup documentation build configuration file, created by # sphinx-quickstart on Thu Jan 26 11:22:55 2012. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Beautiful Soup' copyright = u'2012, Leonard Richardson' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '4' # The full version, including alpha/beta/rc tags. release = '4.2.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'BeautifulSoupdoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'BeautifulSoup.tex', u'Beautiful Soup Documentation', u'Leonard Richardson', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'beautifulsoup', u'Beautiful Soup Documentation', [u'Leonard Richardson'], 1) ] # -- Options for Epub output --------------------------------------------------- # Bibliographic Dublin Core info. epub_title = u'Beautiful Soup' epub_author = u'Leonard Richardson' epub_publisher = u'Leonard Richardson' epub_copyright = u'2012, Leonard Richardson' # The language of the text. It defaults to the language option # or en if the language is not set. #epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. #epub_scheme = '' # The unique identifier of the text. This can be a ISBN number # or the project homepage. #epub_identifier = '' # A unique identification for the text. #epub_uid = '' # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_post_files = [] # A list of files that should not be packed into the epub file. #epub_exclude_files = [] # The depth of the table of contents in toc.ncx. #epub_tocdepth = 3 # Allow duplicate toc entries. #epub_tocdup = True
mit
kernel-hut/android_kernel_asus_Z010D
Documentation/networking/cxacru-cf.py
14668
1626
#!/usr/bin/env python # Copyright 2009 Simon Arlott # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the Free # Software Foundation; either version 2 of the License, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 59 # Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # Usage: cxacru-cf.py < cxacru-cf.bin # Output: values string suitable for the sysfs adsl_config attribute # # Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110 # contains mis-aligned values which will stop the modem from being able # to make a connection. If the first and last two bytes are removed then # the values become valid, but the modulation will be forced to ANSI # T1.413 only which may not be appropriate. # # The original binary format is a packed list of le32 values. import sys import struct i = 0 while True: buf = sys.stdin.read(4) if len(buf) == 0: break elif len(buf) != 4: sys.stdout.write("\n") sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf))) sys.exit(1) if i > 0: sys.stdout.write(" ") sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0])) i += 1 sys.stdout.write("\n")
gpl-2.0
WafaaT/spark-tk
python/sparktk/frame/ops/rename_columns.py
14
2293
# vim: set encoding=utf-8 # Copyright (c) 2016 Intel Corporation  # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # #       http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import sparktk.frame.schema from sparktk.frame.schema import schema_to_python, schema_to_scala def rename_columns(self, names): """ Rename columns Parameters ---------- :param names: (dict) Dictionary of old names to new names. Examples -------- Start with a frame with columns *Black* and *White*. <hide> >>> s = [('Black', unicode), ('White', unicode)] >>> rows = [["glass", "clear"],["paper","unclear"]] >>> my_frame = tc.frame.create(rows, s) -etc- </hide> >>> print my_frame.schema [('Black', <type 'unicode'>), ('White', <type 'unicode'>)] Rename the columns to *Mercury* and *Venus*: >>> my_frame.rename_columns({"Black": "Mercury", "White": "Venus"}) >>> print my_frame.schema [(u'Mercury', <type 'unicode'>), (u'Venus', <type 'unicode'>)] """ if not isinstance(names, dict): raise ValueError("Unsupported 'names' parameter type. Expected dictionary, but found %s." % type(names)) if self.schema is None: raise RuntimeError("Unable rename column(s), because the frame's schema has not been defined.") if self._is_python: scala_rename_map = self._tc.jutils.convert.to_scala_map(names) scala_schema = schema_to_scala(self._tc.sc, self._python.schema) rename_scala_schema = scala_schema.renameColumns(scala_rename_map) self._python.schema = schema_to_python(self._tc.sc, rename_scala_schema) else: self._scala.renameColumns(self._tc.jutils.convert.to_scala_map(names))
apache-2.0
0dadj1an/mix
mix_backup/workspace/python_bundle_1_2/add_access_rule/addAccessRule.py
1
2674
# # addAccessRule.py # version 1.0 # # # This example demonstrates communication with Check Point Management server using Management API Library in Python. # The demonstrated commands are: # # 1. login # 2. adding an access rule to the top of Network layer # 3. publishing the changes # # Logout command is called automatically after the work with Management API Library is completed. # # written by: Check Point software technologies inc. # July 2016 # # cp_management_api is a package that handles the communication with the Check Point management server. from mgmt_api_lib import * # A package for reading passwords without displaying them on the console. import getpass import sys def main(): with APIClient() as client: server = raw_input("Enter server IP address or hostname: ") username = raw_input("Enter username: ") if sys.stdin.isatty(): password = getpass.getpass("Enter password: ") else: print "Attention! Your password will be shown on the screen!" password = raw_input("Enter password: ") ruleName = raw_input("Enter the name of the access rule: ") # # The API client, would look for the server's certificate SHA1 fingerprint in a file. # If the fingerprint is not found on the file, it will ask the user if he accepts the server's fingerprint. # In case the user does not accept the fingerprint, exit the program. if client.check_fingerprint(server) is False: print "Could not get the server's fingerprint - Check connectivity with the server." exit(1) # login to server: login_res = client.login(server, username, password) if login_res.success is False: print "Login failed: {}".format(login_res.error_message) exit(1) # add a rule to the top of the "Network" layer addRuleResponse = client.api_call("add-access-rule", {"name": ruleName, "layer" : "Network", "position" : "top"}); if addRuleResponse.success: print "The rule: '{}' has been added successfully".format(ruleName) # publish the result publishRes = client.api_call("publish", {}); if publishRes.success: print "The changes were published successfully." else: print "Failed to publish the changes." else: print "Failed to add the access-rule: '{}', Error: {}".format(ruleName, addRuleResponse.error_message) if __name__ == "__main__": main()
apache-2.0
hvanhovell/spark
examples/src/main/python/ml/estimator_transformer_param_example.py
123
3952
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Estimator Transformer Param Example. """ from __future__ import print_function # $example on$ from pyspark.ml.linalg import Vectors from pyspark.ml.classification import LogisticRegression # $example off$ from pyspark.sql import SparkSession if __name__ == "__main__": spark = SparkSession\ .builder\ .appName("EstimatorTransformerParamExample")\ .getOrCreate() # $example on$ # Prepare training data from a list of (label, features) tuples. training = spark.createDataFrame([ (1.0, Vectors.dense([0.0, 1.1, 0.1])), (0.0, Vectors.dense([2.0, 1.0, -1.0])), (0.0, Vectors.dense([2.0, 1.3, 1.0])), (1.0, Vectors.dense([0.0, 1.2, -0.5]))], ["label", "features"]) # Create a LogisticRegression instance. This instance is an Estimator. lr = LogisticRegression(maxIter=10, regParam=0.01) # Print out the parameters, documentation, and any default values. print("LogisticRegression parameters:\n" + lr.explainParams() + "\n") # Learn a LogisticRegression model. This uses the parameters stored in lr. model1 = lr.fit(training) # Since model1 is a Model (i.e., a transformer produced by an Estimator), # we can view the parameters it used during fit(). # This prints the parameter (name: value) pairs, where names are unique IDs for this # LogisticRegression instance. print("Model 1 was fit using parameters: ") print(model1.extractParamMap()) # We may alternatively specify parameters using a Python dictionary as a paramMap paramMap = {lr.maxIter: 20} paramMap[lr.maxIter] = 30 # Specify 1 Param, overwriting the original maxIter. paramMap.update({lr.regParam: 0.1, lr.threshold: 0.55}) # Specify multiple Params. # You can combine paramMaps, which are python dictionaries. paramMap2 = {lr.probabilityCol: "myProbability"} # Change output column name paramMapCombined = paramMap.copy() paramMapCombined.update(paramMap2) # Now learn a new model using the paramMapCombined parameters. # paramMapCombined overrides all parameters set earlier via lr.set* methods. model2 = lr.fit(training, paramMapCombined) print("Model 2 was fit using parameters: ") print(model2.extractParamMap()) # Prepare test data test = spark.createDataFrame([ (1.0, Vectors.dense([-1.0, 1.5, 1.3])), (0.0, Vectors.dense([3.0, 2.0, -0.1])), (1.0, Vectors.dense([0.0, 2.2, -1.5]))], ["label", "features"]) # Make predictions on test data using the Transformer.transform() method. # LogisticRegression.transform will only use the 'features' column. # Note that model2.transform() outputs a "myProbability" column instead of the usual # 'probability' column since we renamed the lr.probabilityCol parameter previously. prediction = model2.transform(test) result = prediction.select("features", "label", "myProbability", "prediction") \ .collect() for row in result: print("features=%s, label=%s -> prob=%s, prediction=%s" % (row.features, row.label, row.myProbability, row.prediction)) # $example off$ spark.stop()
apache-2.0
alexschiller/osf.io
osf/management/commands/migratedata.py
2
28919
from __future__ import unicode_literals import gc import importlib import sys import itertools from box import BoxClient from box import BoxClientException from bson import ObjectId from dropbox.client import DropboxClient from dropbox.rest import ErrorResponse from github3 import GitHubError from oauthlib.oauth2 import InvalidGrantError from addons.base.models import BaseOAuthNodeSettings from framework import encryption from osf.models import ExternalAccount from osf.models import OSFUser from addons.s3 import utils import ipdb from django.contrib.contenttypes.models import ContentType from django.core.management import BaseCommand from django.db import IntegrityError, connection, transaction from django.utils import timezone from framework.auth.core import User as MODMUser from framework.mongo import database from framework.transactions.context import transaction as modm_transaction from modularodm import Q as MQ from osf.models import NodeLog, PageCounter, Tag, UserActivityCounter from osf.models.base import Guid, GuidMixin, OptionalGuidMixin from osf.models.node import AbstractNode from osf.utils.order_apps import get_ordered_models from psycopg2._psycopg import AsIs from scripts.register_oauth_scopes import set_backend from typedmodels.models import TypedModel from addons.github.api import GitHubClient from website.files.models import StoredFileNode as MODMStoredFileNode from website.models import Guid as MODMGuid from website.models import Node as MODMNode import logging logger = logging.getLogger(__name__) encryption.encrypt = lambda x: x encryption.decrypt = lambda x: x def get_modm_model(django_model): module_path, model_name = django_model.modm_model_path.rsplit('.', 1) modm_module = importlib.import_module(module_path) return getattr(modm_module, model_name) def migrate_page_counters(page_size=20000): print('Starting {}...'.format(sys._getframe().f_code.co_name)) collection = database['pagecounters'] total = collection.count() count = 0 start_time = timezone.now() while count < total: with transaction.atomic(): django_objects = [] offset = count limit = (count + page_size) if (count + page_size) < total else total page_of_modm_objects = collection.find().sort('_id', 1)[offset:limit] for mongo_obj in page_of_modm_objects: django_objects.append(PageCounter(_id=mongo_obj['_id'], date=mongo_obj['date'], total=mongo_obj['total'], unique=mongo_obj['unique'])) count += 1 if count % page_size == 0 or count == total: page_finish_time = timezone.now() if (count - page_size) < 0: start = 0 else: start = count - page_size print('Saving {} {} through {}...'.format(PageCounter._meta.model.__name__, start, count)) saved_django_objects = PageCounter.objects.bulk_create(django_objects) print('Done with {} {} in {} seconds...'.format(len(saved_django_objects), PageCounter._meta.model.__name__, (timezone.now()-page_finish_time).total_seconds())) saved_django_objects = [] print('Took out {} trashes'.format(gc.collect())) total = None count = None print('Took out {} trashes'.format(gc.collect())) print('Finished {} in {} seconds...'.format(sys._getframe().f_code.co_name, (timezone.now()-start_time).total_seconds())) def migrate_user_activity_counters(page_size=20000): print('Starting {}...'.format(sys._getframe().f_code.co_name)) collection = database['useractivitycounters'] total = collection.count() count = 0 start_time = timezone.now() while count < total: with transaction.atomic(): django_objects = [] offset = count limit = (count + page_size) if (count + page_size) < total else total page_of_modm_objects = collection.find().sort('_id', 1)[offset:limit] for mongo_obj in page_of_modm_objects: django_objects.append(UserActivityCounter(_id=mongo_obj['_id'], date=mongo_obj['date'], total=mongo_obj['total'], action=mongo_obj['action'])) count += 1 if count % page_size == 0 or count == total: page_finish_time = timezone.now() if (count - page_size) < 0: start = 0 else: start = count - page_size print('Saving {} {} through {}...'.format(UserActivityCounter._meta.model.__name__, start, count)) saved_django_objects = UserActivityCounter.objects.bulk_create(django_objects) print('Done with {} {} in {} seconds...'.format(len(saved_django_objects), UserActivityCounter._meta.model.__name__, (timezone.now()-page_finish_time).total_seconds())) saved_django_objects = [] print('Took out {} trashes'.format(gc.collect())) total = None count = None print('Took out {} trashes'.format(gc.collect())) print('Finished {} in {} seconds...'.format(sys._getframe().f_code.co_name, (timezone.now()-start_time).total_seconds())) def make_guids(): print('Starting {}...'.format(sys._getframe().f_code.co_name)) guid_models = [model for model in get_ordered_models() if (issubclass(model, GuidMixin) or issubclass(model, OptionalGuidMixin)) and (not issubclass(model, AbstractNode) or model is AbstractNode)] with connection.cursor() as cursor: with transaction.atomic(): for model in guid_models: with transaction.atomic(): content_type = ContentType.objects.get_for_model(model) if issubclass(model, TypedModel): sql = """ INSERT INTO osf_guid ( _id, object_id, created, content_type_id ) SELECT DISTINCT ON (guid_string) guid, t.id, clock_timestamp(), t.content_type_pk FROM {}_{} AS t, UNNEST(t.guid_string) AS guid WHERE t.guid_string IS NOT NULL AND t.content_type_pk IS NOT NULL ORDER BY t.guid_string, type DESC; """.format(content_type.app_label, content_type.model) else: sql = """ INSERT INTO osf_guid ( _id, object_id, created, content_type_id ) SELECT guid, t.id, clock_timestamp(), t.content_type_pk FROM {}_{} AS t, UNNEST(t.guid_string) AS guid WHERE t.guid_string IS NOT NULL AND t.content_type_pk IS NOT NULL; """.format(content_type.app_label, content_type.model) print('Making guids for {}'.format(model._meta.model.__name__)) try: cursor.execute(sql) except IntegrityError as ex: ipdb.set_trace() guids = MODMGuid.find() guid_keys = guids.get_keys() orphaned_guids = [] for g in guids: try: # if it's one of the abandoned models add it to orphaned_guids if g.to_storage()['referent'][1] in ['dropboxfile', 'osfstorageguidfile', 'osfguidfile', 'githubguidfile', 'nodefile', 'boxfile', 'figshareguidfile', 's3guidfile', 'dataversefile']: orphaned_guids.append(unicode(g._id)) except TypeError: pass # orphaned_guids = [unicode(g._id) for g in guids if g is not None and g.to_storage() is not None and len(g.to_storage['referent']) > 0 and g.to_storage()['referent'][1] in ['dropboxfile', 'osfstorageguidfile', 'osfguidfile', 'githubguidfile', 'nodefile', 'boxfile', 'figshareguidfile', 's3guidfile', 'dataversefile']] # get all the guids in postgres existing_guids = Guid.objects.all().values_list('_id', flat=True) # subtract the orphaned guids from the guids in modm and from that subtract existing guids # that should give us the guids that are missing guids_to_make = (set(guid_keys) - set(orphaned_guids)) - set(existing_guids) print('{} MODM Guids, {} Orphaned Guids, {} Guids to Make, {} Existing guids'.format(len(guid_keys), len(orphaned_guids), len(guids_to_make), len(existing_guids))) from django.apps import apps model_names = {m._meta.model.__name__.lower(): m._meta.model for m in apps.get_models()} with ipdb.launch_ipdb_on_exception(): # loop through missing guids for guid in guids_to_make: # load them from modm guid_dict = MODMGuid.load(guid).to_storage() # if they don't have a referent toss them if guid_dict['referent'] is None: print('{} has no referent.'.format(guid)) continue # get the model string from the referent modm_model_string = guid_dict['referent'][1] if modm_model_string == 'user': modm_model_string = 'osfuser' # if the model string is in our list of models load it up if modm_model_string in model_names: referent_model = model_names[modm_model_string] else: # this filters out bad models, like osfstorageguidfile # but these should already be gone print('Couldn\'t find model for {}'.format(modm_model_string)) continue # get the id from the to_storage dictionary modm_model_id = guid_dict['_id'] # if it's something that should have a guid if issubclass(referent_model, GuidMixin) or issubclass(referent_model, OptionalGuidMixin): try: # find it's referent referent_instance = referent_model.objects.get(guid_string__contains=[modm_model_id.lower()]) except referent_model.DoesNotExist: print('Couldn\'t find referent for {}:{}'.format(referent_model._meta.model.__name__, modm_model_id)) continue else: # we shouldn't ever get here, bad data print('Found guid pointing at {} type, dropping it on the floor.'.format(modm_model_string)) continue # if we got a referent instance create the guid if referent_instance: Guid.objects.create(referent=referent_instance) else: print('{} {} didn\'t create a Guid'.format(referent_model._meta.model.__name__, modm_model_id)) print('Started creating blacklist orphaned guids.') with connection.cursor() as cursor: sql = """ INSERT INTO osf_blacklistguid (guid) VALUES %(guids)s ON CONFLICT DO NOTHING; """ params = ''.join(['(\'{}\'), '.format(og) for og in orphaned_guids])[0:-2] cursor.execute(sql, {'guids': AsIs(params)}) def save_bare_models(modm_queryset, django_model, page_size=20000): print('Starting {} on {}...'.format(sys._getframe().f_code.co_name, django_model._meta.model.__name__)) count = 0 total = modm_queryset.count() hashes = set() while count < total: with transaction.atomic(): django_objects = list() offset = count limit = (count + page_size) if (count + page_size) < total else total page_of_modm_objects = modm_queryset.sort('-_id')[offset:limit] if not hasattr(django_model, '_natural_key'): print('{} is missing a natural key!'.format(django_model._meta.model.__name__)) for modm_obj in page_of_modm_objects: django_instance = django_model.migrate_from_modm(modm_obj) if django_instance is None: count += 1 continue if django_instance._natural_key() is not None: # if there's a natural key if isinstance(django_instance._natural_key(), list): found = [] for nk in django_instance._natural_key(): if nk not in hashes: hashes.add(nk) else: found.append(nk) if not found: django_objects.append(django_instance) else: count += 1 print('{} with guids {} was already in hashes'.format(django_instance._meta.model.__name__, found)) continue else: if django_instance._natural_key() not in hashes: # and that natural key doesn't exist in hashes # add it to hashes and append the object hashes.add(django_instance._natural_key()) django_objects.append(django_instance) else: count += 1 continue else: django_objects.append(django_instance) count += 1 if count % page_size == 0 or count == total: page_finish_time = timezone.now() if (count - page_size) < 0: start = 0 else: start = count - page_size print( 'Saving {} {} through {}...'.format(django_model._meta.model.__name__, start, count)) saved_django_objects = django_model.objects.bulk_create(django_objects) print('Done with {} {} in {} seconds...'.format(len(saved_django_objects), django_model._meta.model.__name__, ( timezone.now() - page_finish_time).total_seconds())) modm_obj._cache.clear() modm_obj._object_cache.clear() saved_django_objects = [] page_of_modm_objects = [] print('Took out {} trashes'.format(gc.collect())) total = None count = None hashes = None print('Took out {} trashes'.format(gc.collect())) class DuplicateExternalAccounts(Exception): pass def save_bare_external_accounts(page_size=100000): from website.models import ExternalAccount as MODMExternalAccount def validate_box(external_account): client = BoxClient(external_account.oauth_key) try: client.get_user_info() except (BoxClientException, IndexError): return False return True def validate_dropbox(external_account): client = DropboxClient(external_account.oauth_key) try: client.account_info() except (ValueError, IndexError, ErrorResponse): return False return True def validate_github(external_account): client = GitHubClient(external_account=external_account) try: client.user() except (GitHubError, IndexError): return False return True def validate_googledrive(external_account): try: external_account.node_settings.fetch_access_token() except (InvalidGrantError, AttributeError): return False return True def validate_s3(external_account): if utils.can_list(external_account.oauth_key, external_account.oauth_secret): return True return False account_validators = dict( box=validate_box, dropbox=validate_dropbox, github=validate_github, googledrive=validate_googledrive, s3=validate_s3 ) django_model_classes_with_fk_to_external_account = BaseOAuthNodeSettings.__subclasses__() django_model_classes_with_m2m_to_external_account = [OSFUser] print('Starting save_bare_external_accounts...') start = timezone.now() external_accounts = MODMExternalAccount.find() accounts_by_provider = dict() for ea in external_accounts: provider_tuple = (ea.provider, str(ea.provider_id)) if provider_tuple in accounts_by_provider.keys(): accounts_by_provider[provider_tuple].append(ea) else: accounts_by_provider[provider_tuple] = [ea, ] bad_accounts = {k:v for k, v in accounts_by_provider.iteritems() if len(v) > 1} good_accounts = [v[0] for k, v in accounts_by_provider.iteritems() if len(v) == 1] for (provider, provider_id), providers_accounts in bad_accounts.iteritems(): good_provider_accounts = [] for modm_external_acct in providers_accounts: if account_validators[provider](modm_external_acct): logger.info('Account {} checks out as valid.'.format(modm_external_acct)) good_provider_accounts.append(modm_external_acct) if len(good_provider_accounts) > 1: raise DuplicateExternalAccounts('{} {} had {} good accounts.'.format(provider, provider_id, len(good_provider_accounts))) else: itertools.chain(good_accounts, good_provider_accounts) with transaction.atomic(): good_django_accounts = ExternalAccount.objects.bulk_create([ExternalAccount.migrate_from_modm(x) for x in good_accounts]) external_account_mapping = dict(ExternalAccount.objects.all().values_list('_id', 'id')) for (provider, provider_id), providers_accounts in bad_accounts.iteritems(): newest_modm_external_account_id = str(ObjectId.from_datetime(timezone.datetime(1970, 1, 1, tzinfo=timezone.UTC()))) modm_external_account_ids_to_replace = [] for modm_external_acct in providers_accounts: if ObjectId(modm_external_acct._id).generation_time > ObjectId(newest_modm_external_account_id).generation_time: modm_external_account_ids_to_replace.append(newest_modm_external_account_id) newest_modm_external_account_id = modm_external_acct._id ext = ExternalAccount.migrate_from_modm(MODMExternalAccount.load(newest_modm_external_account_id)) ext.save() external_account_mapping[newest_modm_external_account_id] = ext.id for modm_external_account_to_replace in modm_external_account_ids_to_replace: for django_model_class in django_model_classes_with_fk_to_external_account: if hasattr(django_model_class, 'modm_model_path'): modm_model_class = get_modm_model(django_model_class) matching_models = modm_model_class.find(MQ('external_account', 'eq', modm_external_account_to_replace)) django_model_class.objects.filter(_id__in=matching_models.get_keys()).update(external_account_id=external_account_mapping[newest_modm_external_account_id]) for django_model_class in django_model_classes_with_m2m_to_external_account: if hasattr(django_model_class, 'modm_model_path'): modm_model_class = get_modm_model(django_model_class) for model_guid in modm_model_class.find(MQ('external_accounts', 'eq', modm_external_account_to_replace)).get_keys(): django_model = django_model_class.objects.get(guids___id=model_guid) django_model.external_accounts.add(external_account_mapping[newest_modm_external_account_id]) def save_bare_system_tags(page_size=10000): print('Starting save_bare_system_tags...') start = timezone.now() things = list(MODMNode.find(MQ('system_tags', 'ne', [])).sort( '-_id')) + list(MODMUser.find(MQ('system_tags', 'ne', [])).sort( '-_id')) system_tag_ids = [] for thing in things: for system_tag in thing.system_tags: system_tag_ids.append(system_tag) unique_system_tag_ids = set(system_tag_ids) total = len(unique_system_tag_ids) system_tags = [] for system_tag_id in unique_system_tag_ids: system_tags.append(Tag(name=system_tag_id, system=True)) created_system_tags = Tag.objects.bulk_create(system_tags) print('MODM System Tags: {}'.format(total)) print('django system tags: {}'.format(Tag.objects.filter(system=True).count())) print('Done with {} in {} seconds...'.format( sys._getframe().f_code.co_name, (timezone.now() - start).total_seconds())) def register_nonexistent_models_with_modm(): """ There are guids refering to models that no longer exist. We can't delete the guids because then they could be regenerated. These models are registered so that anything at all will work. :return: """ class DropboxFile(MODMStoredFileNode): _primary_key = '_id' pass class OSFStorageGuidFile(MODMStoredFileNode): _primary_key = '_id' pass class OSFGuidFile(MODMStoredFileNode): _primary_key = '_id' pass class GithubGuidFile(MODMStoredFileNode): _primary_key = '_id' pass class NodeFile(MODMStoredFileNode): _primary_key = '_id' pass class BoxFile(MODMStoredFileNode): _primary_key = '_id' pass class FigShareGuidFile(MODMStoredFileNode): _primary_key = '_id' pass class S3GuidFile(MODMStoredFileNode): _primary_key = '_id' pass class DataverseFile(MODMStoredFileNode): _primary_key = '_id' pass DataverseFile.register_collection() NodeFile.register_collection() S3GuidFile.register_collection() FigShareGuidFile.register_collection() BoxFile.register_collection() GithubGuidFile.register_collection() OSFStorageGuidFile.register_collection() OSFGuidFile.register_collection() DropboxFile.register_collection() @modm_transaction() def merge_duplicate_users(): print('Starting {}...'.format(sys._getframe().f_code.co_name)) start = timezone.now() from framework.mongo.handlers import database duplicates = database.user.aggregate([ { "$group": { "_id": "$username", "ids": {"$addToSet": "$_id"}, "count": {"$sum": 1} } }, { "$match": { "count": {"$gt": 1} } }, { "$sort": { "count": -1 } } ]).get('result') # [ # { # 'count': 5, # '_id': 'duplicated@username.com', # 'ids': [ # 'listo','fidst','hatma','tchth','euser','name!' # ] # } # ] print('Found {} duplicate usernames.'.format(len(duplicates))) for duplicate in duplicates: print('Found {} copies of {}'.format(len(duplicate.get('ids')), duplicate.get('_id'))) if duplicate.get('_id'): # _id is an email address, merge users keeping the one that was logged into last users = list(MODMUser.find(MQ('_id', 'in', duplicate.get('ids'))).sort('-last_login')) best_match = users.pop() for user in users: print('Merging user {} into user {}'.format(user._id, best_match._id)) best_match.merge_user(user) else: # _id is null, set all usernames to their guid users = MODMUser.find(MQ('_id', 'in', duplicate.get('ids'))) for user in users: print('Setting username for {}'.format(user._id)) user.username = user._id user.save() print('Done with {} in {} seconds...'.format( sys._getframe().f_code.co_name, (timezone.now() - start).total_seconds())) class Command(BaseCommand): help = 'Migrates data from tokumx to postgres' threads = 5 def add_arguments(self, parser): parser.add_argument('--nodelogs', action='store_true', help='Run nodelog migrations') parser.add_argument('--nodelogsguids', action='store_true', help='Run nodelog guid migrations') def handle(self, *args, **options): set_backend() # it's either this or catch the exception and put them in the blacklistguid table register_nonexistent_models_with_modm() models = get_ordered_models() # guids never, pls models.pop(models.index(Guid)) models.pop(models.index(ExternalAccount)) if not options['nodelogs'] and not options['nodelogsguids']: merge_duplicate_users() # merged users get blank usernames, running it twice fixes it. merge_duplicate_users() for django_model in models: if not options['nodelogs'] and not options['nodelogsguids'] and django_model is NodeLog: continue elif (options['nodelogs'] or options['nodelogsguids']) and django_model is not NodeLog: continue elif django_model is AbstractNode: continue if not hasattr(django_model, 'modm_model_path'): print('################################################\n' '{} doesn\'t have a modm_model_path\n' '################################################'.format( django_model._meta.model.__name__)) continue modm_model = get_modm_model(django_model) if isinstance(django_model.modm_query, dict): modm_queryset = modm_model.find(**django_model.modm_query) else: modm_queryset = modm_model.find(django_model.modm_query) with ipdb.launch_ipdb_on_exception(): if not options['nodelogsguids']: save_bare_models(modm_queryset, django_model, page_size=django_model.migration_page_size) modm_model._cache.clear() modm_model._object_cache.clear() print('Took out {} trashes'.format(gc.collect())) # Handle system tags, they're on nodes, they need a special migration if not options['nodelogs'] and not options['nodelogsguids']: with ipdb.launch_ipdb_on_exception(): save_bare_system_tags() make_guids() save_bare_external_accounts() migrate_page_counters() migrate_user_activity_counters()
apache-2.0
2014c2g3/cda0512
static/Brython3.1.1-20150328-091302/Lib/unittest/__init__.py
900
2718
""" Python unit testing framework, based on Erich Gamma's JUnit and Kent Beck's Smalltalk testing framework. This module contains the core framework classes that form the basis of specific test cases and suites (TestCase, TestSuite etc.), and also a text-based utility class for running the tests and reporting the results (TextTestRunner). Simple usage: import unittest class IntegerArithmeticTestCase(unittest.TestCase): def testAdd(self): ## test method names begin 'test*' self.assertEqual((1 + 2), 3) self.assertEqual(0 + 1, 1) def testMultiply(self): self.assertEqual((0 * 10), 0) self.assertEqual((5 * 8), 40) if __name__ == '__main__': unittest.main() Further information is available in the bundled documentation, and from http://docs.python.org/library/unittest.html Copyright (c) 1999-2003 Steve Purcell Copyright (c) 2003-2010 Python Software Foundation This module is free software, and you may redistribute it and/or modify it under the same terms as Python itself, so long as this copyright message and disclaimer are retained in their original form. IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. """ __all__ = ['TestResult', 'TestCase', 'TestSuite', 'TextTestRunner', 'TestLoader', 'FunctionTestCase', 'main', 'defaultTestLoader', 'SkipTest', 'skip', 'skipIf', 'skipUnless', 'expectedFailure', 'TextTestResult', 'installHandler', 'registerResult', 'removeResult', 'removeHandler'] # Expose obsolete functions for backwards compatibility __all__.extend(['getTestCaseNames', 'makeSuite', 'findTestCases']) __unittest = True from .result import TestResult from .case import (TestCase, FunctionTestCase, SkipTest, skip, skipIf, skipUnless, expectedFailure) from .suite import BaseTestSuite, TestSuite from .loader import (TestLoader, defaultTestLoader, makeSuite, getTestCaseNames, findTestCases) from .main import TestProgram, main from .runner import TextTestRunner, TextTestResult from .signals import installHandler, registerResult, removeResult, removeHandler # deprecated _TextTestResult = TextTestResult
agpl-3.0
markmc/oslo.messaging
oslo/messaging/notify/_impl_messaging.py
1
2131
# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from oslo import messaging from oslo.messaging.notify import notifier LOG = logging.getLogger(__name__) class MessagingDriver(notifier._Driver): """Send notifications using the 1.0 message format. This driver sends notifications over the configured messaging transport, but without any message envelope (also known as message format 1.0). This driver should only be used in cases where there are existing consumers deployed which do not support the 2.0 message format. """ def __init__(self, conf, topics, transport, envelope=False): super(MessagingDriver, self).__init__(conf, topics, transport) self.envelope = envelope def notify(self, ctxt, message, priority): priority = priority.lower() for topic in self.topics: target = messaging.Target(topic='%s.%s' % (topic, priority)) try: self.transport._send(target, ctxt, message, envelope=self.envelope) except Exception: LOG.exception("Could not send notification to %(topic)s. " "Payload=%(message)s", dict(topic=topic, message=message)) class MessagingV2Driver(MessagingDriver): "Send notifications using the 2.0 message format." def __init__(self, conf, **kwargs): super(MessagingV2Driver, self).__init__(conf, envelope=True, **kwargs)
apache-2.0
matmutant/sl4a
python/src/Lib/lib-tk/Tix.py
50
74119
# -*-mode: python; fill-column: 75; tab-width: 8; coding: iso-latin-1-unix -*- # # $Id: Tix.py 63487 2008-05-20 07:13:37Z georg.brandl $ # # Tix.py -- Tix widget wrappers. # # For Tix, see http://tix.sourceforge.net # # - Sudhir Shenoy (sshenoy@gol.com), Dec. 1995. # based on an idea of Jean-Marc Lugrin (lugrin@ms.com) # # NOTE: In order to minimize changes to Tkinter.py, some of the code here # (TixWidget.__init__) has been taken from Tkinter (Widget.__init__) # and will break if there are major changes in Tkinter. # # The Tix widgets are represented by a class hierarchy in python with proper # inheritance of base classes. # # As a result after creating a 'w = StdButtonBox', I can write # w.ok['text'] = 'Who Cares' # or w.ok['bg'] = w['bg'] # or even w.ok.invoke() # etc. # # Compare the demo tixwidgets.py to the original Tcl program and you will # appreciate the advantages. # from Tkinter import * from Tkinter import _flatten, _cnfmerge, _default_root # WARNING - TkVersion is a limited precision floating point number if TkVersion < 3.999: raise ImportError, "This version of Tix.py requires Tk 4.0 or higher" import _tkinter # If this fails your Python may not be configured for Tk # Some more constants (for consistency with Tkinter) WINDOW = 'window' TEXT = 'text' STATUS = 'status' IMMEDIATE = 'immediate' IMAGE = 'image' IMAGETEXT = 'imagetext' BALLOON = 'balloon' AUTO = 'auto' ACROSSTOP = 'acrosstop' # Some constants used by Tkinter dooneevent() TCL_DONT_WAIT = 1 << 1 TCL_WINDOW_EVENTS = 1 << 2 TCL_FILE_EVENTS = 1 << 3 TCL_TIMER_EVENTS = 1 << 4 TCL_IDLE_EVENTS = 1 << 5 TCL_ALL_EVENTS = 0 # BEWARE - this is implemented by copying some code from the Widget class # in Tkinter (to override Widget initialization) and is therefore # liable to break. import Tkinter, os # Could probably add this to Tkinter.Misc class tixCommand: """The tix commands provide access to miscellaneous elements of Tix's internal state and the Tix application context. Most of the information manipulated by these commands pertains to the application as a whole, or to a screen or display, rather than to a particular window. This is a mixin class, assumed to be mixed to Tkinter.Tk that supports the self.tk.call method. """ def tix_addbitmapdir(self, directory): """Tix maintains a list of directories under which the tix_getimage and tix_getbitmap commands will search for image files. The standard bitmap directory is $TIX_LIBRARY/bitmaps. The addbitmapdir command adds directory into this list. By using this command, the image files of an applications can also be located using the tix_getimage or tix_getbitmap command. """ return self.tk.call('tix', 'addbitmapdir', directory) def tix_cget(self, option): """Returns the current value of the configuration option given by option. Option may be any of the options described in the CONFIGURATION OPTIONS section. """ return self.tk.call('tix', 'cget', option) def tix_configure(self, cnf=None, **kw): """Query or modify the configuration options of the Tix application context. If no option is specified, returns a dictionary all of the available options. If option is specified with no value, then the command returns a list describing the one named option (this list will be identical to the corresponding sublist of the value returned if no option is specified). If one or more option-value pairs are specified, then the command modifies the given option(s) to have the given value(s); in this case the command returns an empty string. Option may be any of the configuration options. """ # Copied from Tkinter.py if kw: cnf = _cnfmerge((cnf, kw)) elif cnf: cnf = _cnfmerge(cnf) if cnf is None: cnf = {} for x in self.tk.split(self.tk.call('tix', 'configure')): cnf[x[0][1:]] = (x[0][1:],) + x[1:] return cnf if isinstance(cnf, StringType): x = self.tk.split(self.tk.call('tix', 'configure', '-'+cnf)) return (x[0][1:],) + x[1:] return self.tk.call(('tix', 'configure') + self._options(cnf)) def tix_filedialog(self, dlgclass=None): """Returns the file selection dialog that may be shared among different calls from this application. This command will create a file selection dialog widget when it is called the first time. This dialog will be returned by all subsequent calls to tix_filedialog. An optional dlgclass parameter can be passed to specified what type of file selection dialog widget is desired. Possible options are tix FileSelectDialog or tixExFileSelectDialog. """ if dlgclass is not None: return self.tk.call('tix', 'filedialog', dlgclass) else: return self.tk.call('tix', 'filedialog') def tix_getbitmap(self, name): """Locates a bitmap file of the name name.xpm or name in one of the bitmap directories (see the tix_addbitmapdir command above). By using tix_getbitmap, you can avoid hard coding the pathnames of the bitmap files in your application. When successful, it returns the complete pathname of the bitmap file, prefixed with the character '@'. The returned value can be used to configure the -bitmap option of the TK and Tix widgets. """ return self.tk.call('tix', 'getbitmap', name) def tix_getimage(self, name): """Locates an image file of the name name.xpm, name.xbm or name.ppm in one of the bitmap directories (see the addbitmapdir command above). If more than one file with the same name (but different extensions) exist, then the image type is chosen according to the depth of the X display: xbm images are chosen on monochrome displays and color images are chosen on color displays. By using tix_ getimage, you can advoid hard coding the pathnames of the image files in your application. When successful, this command returns the name of the newly created image, which can be used to configure the -image option of the Tk and Tix widgets. """ return self.tk.call('tix', 'getimage', name) def tix_option_get(self, name): """Gets the options manitained by the Tix scheme mechanism. Available options include: active_bg active_fg bg bold_font dark1_bg dark1_fg dark2_bg dark2_fg disabled_fg fg fixed_font font inactive_bg inactive_fg input1_bg input2_bg italic_font light1_bg light1_fg light2_bg light2_fg menu_font output1_bg output2_bg select_bg select_fg selector """ # could use self.tk.globalgetvar('tixOption', name) return self.tk.call('tix', 'option', 'get', name) def tix_resetoptions(self, newScheme, newFontSet, newScmPrio=None): """Resets the scheme and fontset of the Tix application to newScheme and newFontSet, respectively. This affects only those widgets created after this call. Therefore, it is best to call the resetoptions command before the creation of any widgets in a Tix application. The optional parameter newScmPrio can be given to reset the priority level of the Tk options set by the Tix schemes. Because of the way Tk handles the X option database, after Tix has been has imported and inited, it is not possible to reset the color schemes and font sets using the tix config command. Instead, the tix_resetoptions command must be used. """ if newScmPrio is not None: return self.tk.call('tix', 'resetoptions', newScheme, newFontSet, newScmPrio) else: return self.tk.call('tix', 'resetoptions', newScheme, newFontSet) class Tk(Tkinter.Tk, tixCommand): """Toplevel widget of Tix which represents mostly the main window of an application. It has an associated Tcl interpreter.""" def __init__(self, screenName=None, baseName=None, className='Tix'): Tkinter.Tk.__init__(self, screenName, baseName, className) tixlib = os.environ.get('TIX_LIBRARY') self.tk.eval('global auto_path; lappend auto_path [file dir [info nameof]]') if tixlib is not None: self.tk.eval('global auto_path; lappend auto_path {%s}' % tixlib) self.tk.eval('global tcl_pkgPath; lappend tcl_pkgPath {%s}' % tixlib) # Load Tix - this should work dynamically or statically # If it's static, tcl/tix8.1/pkgIndex.tcl should have # 'load {} Tix' # If it's dynamic under Unix, tcl/tix8.1/pkgIndex.tcl should have # 'load libtix8.1.8.3.so Tix' self.tk.eval('package require Tix') def destroy(self): # For safety, remove an delete_window binding before destroy self.protocol("WM_DELETE_WINDOW", "") Tkinter.Tk.destroy(self) # The Tix 'tixForm' geometry manager class Form: """The Tix Form geometry manager Widgets can be arranged by specifying attachments to other widgets. See Tix documentation for complete details""" def config(self, cnf={}, **kw): self.tk.call('tixForm', self._w, *self._options(cnf, kw)) form = config def __setitem__(self, key, value): Form.form(self, {key: value}) def check(self): return self.tk.call('tixForm', 'check', self._w) def forget(self): self.tk.call('tixForm', 'forget', self._w) def grid(self, xsize=0, ysize=0): if (not xsize) and (not ysize): x = self.tk.call('tixForm', 'grid', self._w) y = self.tk.splitlist(x) z = () for x in y: z = z + (self.tk.getint(x),) return z return self.tk.call('tixForm', 'grid', self._w, xsize, ysize) def info(self, option=None): if not option: return self.tk.call('tixForm', 'info', self._w) if option[0] != '-': option = '-' + option return self.tk.call('tixForm', 'info', self._w, option) def slaves(self): return map(self._nametowidget, self.tk.splitlist( self.tk.call( 'tixForm', 'slaves', self._w))) Tkinter.Widget.__bases__ = Tkinter.Widget.__bases__ + (Form,) class TixWidget(Tkinter.Widget): """A TixWidget class is used to package all (or most) Tix widgets. Widget initialization is extended in two ways: 1) It is possible to give a list of options which must be part of the creation command (so called Tix 'static' options). These cannot be given as a 'config' command later. 2) It is possible to give the name of an existing TK widget. These are child widgets created automatically by a Tix mega-widget. The Tk call to create these widgets is therefore bypassed in TixWidget.__init__ Both options are for use by subclasses only. """ def __init__ (self, master=None, widgetName=None, static_options=None, cnf={}, kw={}): # Merge keywords and dictionary arguments if kw: cnf = _cnfmerge((cnf, kw)) else: cnf = _cnfmerge(cnf) # Move static options into extra. static_options must be # a list of keywords (or None). extra=() # 'options' is always a static option if static_options: static_options.append('options') else: static_options = ['options'] for k,v in cnf.items()[:]: if k in static_options: extra = extra + ('-' + k, v) del cnf[k] self.widgetName = widgetName Widget._setup(self, master, cnf) # If widgetName is None, this is a dummy creation call where the # corresponding Tk widget has already been created by Tix if widgetName: self.tk.call(widgetName, self._w, *extra) # Non-static options - to be done via a 'config' command if cnf: Widget.config(self, cnf) # Dictionary to hold subwidget names for easier access. We can't # use the children list because the public Tix names may not be the # same as the pathname component self.subwidget_list = {} # We set up an attribute access function so that it is possible to # do w.ok['text'] = 'Hello' rather than w.subwidget('ok')['text'] = 'Hello' # when w is a StdButtonBox. # We can even do w.ok.invoke() because w.ok is subclassed from the # Button class if you go through the proper constructors def __getattr__(self, name): if self.subwidget_list.has_key(name): return self.subwidget_list[name] raise AttributeError, name def set_silent(self, value): """Set a variable without calling its action routine""" self.tk.call('tixSetSilent', self._w, value) def subwidget(self, name): """Return the named subwidget (which must have been created by the sub-class).""" n = self._subwidget_name(name) if not n: raise TclError, "Subwidget " + name + " not child of " + self._name # Remove header of name and leading dot n = n[len(self._w)+1:] return self._nametowidget(n) def subwidgets_all(self): """Return all subwidgets.""" names = self._subwidget_names() if not names: return [] retlist = [] for name in names: name = name[len(self._w)+1:] try: retlist.append(self._nametowidget(name)) except: # some of the widgets are unknown e.g. border in LabelFrame pass return retlist def _subwidget_name(self,name): """Get a subwidget name (returns a String, not a Widget !)""" try: return self.tk.call(self._w, 'subwidget', name) except TclError: return None def _subwidget_names(self): """Return the name of all subwidgets.""" try: x = self.tk.call(self._w, 'subwidgets', '-all') return self.tk.split(x) except TclError: return None def config_all(self, option, value): """Set configuration options for all subwidgets (and self).""" if option == '': return elif not isinstance(option, StringType): option = repr(option) if not isinstance(value, StringType): value = repr(value) names = self._subwidget_names() for name in names: self.tk.call(name, 'configure', '-' + option, value) # These are missing from Tkinter def image_create(self, imgtype, cnf={}, master=None, **kw): if not master: master = Tkinter._default_root if not master: raise RuntimeError, 'Too early to create image' if kw and cnf: cnf = _cnfmerge((cnf, kw)) elif kw: cnf = kw options = () for k, v in cnf.items(): if callable(v): v = self._register(v) options = options + ('-'+k, v) return master.tk.call(('image', 'create', imgtype,) + options) def image_delete(self, imgname): try: self.tk.call('image', 'delete', imgname) except TclError: # May happen if the root was destroyed pass # Subwidgets are child widgets created automatically by mega-widgets. # In python, we have to create these subwidgets manually to mirror their # existence in Tk/Tix. class TixSubWidget(TixWidget): """Subwidget class. This is used to mirror child widgets automatically created by Tix/Tk as part of a mega-widget in Python (which is not informed of this)""" def __init__(self, master, name, destroy_physically=1, check_intermediate=1): if check_intermediate: path = master._subwidget_name(name) try: path = path[len(master._w)+1:] plist = path.split('.') except: plist = [] if not check_intermediate: # immediate descendant TixWidget.__init__(self, master, None, None, {'name' : name}) else: # Ensure that the intermediate widgets exist parent = master for i in range(len(plist) - 1): n = '.'.join(plist[:i+1]) try: w = master._nametowidget(n) parent = w except KeyError: # Create the intermediate widget parent = TixSubWidget(parent, plist[i], destroy_physically=0, check_intermediate=0) # The Tk widget name is in plist, not in name if plist: name = plist[-1] TixWidget.__init__(self, parent, None, None, {'name' : name}) self.destroy_physically = destroy_physically def destroy(self): # For some widgets e.g., a NoteBook, when we call destructors, # we must be careful not to destroy the frame widget since this # also destroys the parent NoteBook thus leading to an exception # in Tkinter when it finally calls Tcl to destroy the NoteBook for c in self.children.values(): c.destroy() if self.master.children.has_key(self._name): del self.master.children[self._name] if self.master.subwidget_list.has_key(self._name): del self.master.subwidget_list[self._name] if self.destroy_physically: # This is bypassed only for a few widgets self.tk.call('destroy', self._w) # Useful func. to split Tcl lists and return as a dict. From Tkinter.py def _lst2dict(lst): dict = {} for x in lst: dict[x[0][1:]] = (x[0][1:],) + x[1:] return dict # Useful class to create a display style - later shared by many items. # Contributed by Steffen Kremser class DisplayStyle: """DisplayStyle - handle configuration options shared by (multiple) Display Items""" def __init__(self, itemtype, cnf={}, **kw): master = _default_root # global from Tkinter if not master and cnf.has_key('refwindow'): master=cnf['refwindow'] elif not master and kw.has_key('refwindow'): master= kw['refwindow'] elif not master: raise RuntimeError, "Too early to create display style: no root window" self.tk = master.tk self.stylename = self.tk.call('tixDisplayStyle', itemtype, *self._options(cnf,kw) ) def __str__(self): return self.stylename def _options(self, cnf, kw): if kw and cnf: cnf = _cnfmerge((cnf, kw)) elif kw: cnf = kw opts = () for k, v in cnf.items(): opts = opts + ('-'+k, v) return opts def delete(self): self.tk.call(self.stylename, 'delete') def __setitem__(self,key,value): self.tk.call(self.stylename, 'configure', '-%s'%key, value) def config(self, cnf={}, **kw): return _lst2dict( self.tk.split( self.tk.call( self.stylename, 'configure', *self._options(cnf,kw)))) def __getitem__(self,key): return self.tk.call(self.stylename, 'cget', '-%s'%key) ###################################################### ### The Tix Widget classes - in alphabetical order ### ###################################################### class Balloon(TixWidget): """Balloon help widget. Subwidget Class --------- ----- label Label message Message""" # FIXME: It should inherit -superclass tixShell def __init__(self, master=None, cnf={}, **kw): # static seem to be -installcolormap -initwait -statusbar -cursor static = ['options', 'installcolormap', 'initwait', 'statusbar', 'cursor'] TixWidget.__init__(self, master, 'tixBalloon', static, cnf, kw) self.subwidget_list['label'] = _dummyLabel(self, 'label', destroy_physically=0) self.subwidget_list['message'] = _dummyLabel(self, 'message', destroy_physically=0) def bind_widget(self, widget, cnf={}, **kw): """Bind balloon widget to another. One balloon widget may be bound to several widgets at the same time""" self.tk.call(self._w, 'bind', widget._w, *self._options(cnf, kw)) def unbind_widget(self, widget): self.tk.call(self._w, 'unbind', widget._w) class ButtonBox(TixWidget): """ButtonBox - A container for pushbuttons. Subwidgets are the buttons added with the add method. """ def __init__(self, master=None, cnf={}, **kw): TixWidget.__init__(self, master, 'tixButtonBox', ['orientation', 'options'], cnf, kw) def add(self, name, cnf={}, **kw): """Add a button with given name to box.""" btn = self.tk.call(self._w, 'add', name, *self._options(cnf, kw)) self.subwidget_list[name] = _dummyButton(self, name) return btn def invoke(self, name): if self.subwidget_list.has_key(name): self.tk.call(self._w, 'invoke', name) class ComboBox(TixWidget): """ComboBox - an Entry field with a dropdown menu. The user can select a choice by either typing in the entry subwdget or selecting from the listbox subwidget. Subwidget Class --------- ----- entry Entry arrow Button slistbox ScrolledListBox tick Button cross Button : present if created with the fancy option""" # FIXME: It should inherit -superclass tixLabelWidget def __init__ (self, master=None, cnf={}, **kw): TixWidget.__init__(self, master, 'tixComboBox', ['editable', 'dropdown', 'fancy', 'options'], cnf, kw) self.subwidget_list['label'] = _dummyLabel(self, 'label') self.subwidget_list['entry'] = _dummyEntry(self, 'entry') self.subwidget_list['arrow'] = _dummyButton(self, 'arrow') self.subwidget_list['slistbox'] = _dummyScrolledListBox(self, 'slistbox') try: self.subwidget_list['tick'] = _dummyButton(self, 'tick') self.subwidget_list['cross'] = _dummyButton(self, 'cross') except TypeError: # unavailable when -fancy not specified pass # align def add_history(self, str): self.tk.call(self._w, 'addhistory', str) def append_history(self, str): self.tk.call(self._w, 'appendhistory', str) def insert(self, index, str): self.tk.call(self._w, 'insert', index, str) def pick(self, index): self.tk.call(self._w, 'pick', index) class Control(TixWidget): """Control - An entry field with value change arrows. The user can adjust the value by pressing the two arrow buttons or by entering the value directly into the entry. The new value will be checked against the user-defined upper and lower limits. Subwidget Class --------- ----- incr Button decr Button entry Entry label Label""" # FIXME: It should inherit -superclass tixLabelWidget def __init__ (self, master=None, cnf={}, **kw): TixWidget.__init__(self, master, 'tixControl', ['options'], cnf, kw) self.subwidget_list['incr'] = _dummyButton(self, 'incr') self.subwidget_list['decr'] = _dummyButton(self, 'decr') self.subwidget_list['label'] = _dummyLabel(self, 'label') self.subwidget_list['entry'] = _dummyEntry(self, 'entry') def decrement(self): self.tk.call(self._w, 'decr') def increment(self): self.tk.call(self._w, 'incr') def invoke(self): self.tk.call(self._w, 'invoke') def update(self): self.tk.call(self._w, 'update') class DirList(TixWidget): """DirList - displays a list view of a directory, its previous directories and its sub-directories. The user can choose one of the directories displayed in the list or change to another directory. Subwidget Class --------- ----- hlist HList hsb Scrollbar vsb Scrollbar""" # FIXME: It should inherit -superclass tixScrolledHList def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixDirList', ['options'], cnf, kw) self.subwidget_list['hlist'] = _dummyHList(self, 'hlist') self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb') self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb') def chdir(self, dir): self.tk.call(self._w, 'chdir', dir) class DirTree(TixWidget): """DirTree - Directory Listing in a hierarchical view. Displays a tree view of a directory, its previous directories and its sub-directories. The user can choose one of the directories displayed in the list or change to another directory. Subwidget Class --------- ----- hlist HList hsb Scrollbar vsb Scrollbar""" # FIXME: It should inherit -superclass tixScrolledHList def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixDirTree', ['options'], cnf, kw) self.subwidget_list['hlist'] = _dummyHList(self, 'hlist') self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb') self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb') def chdir(self, dir): self.tk.call(self._w, 'chdir', dir) class DirSelectBox(TixWidget): """DirSelectBox - Motif style file select box. It is generally used for the user to choose a file. FileSelectBox stores the files mostly recently selected into a ComboBox widget so that they can be quickly selected again. Subwidget Class --------- ----- selection ComboBox filter ComboBox dirlist ScrolledListBox filelist ScrolledListBox""" def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixDirSelectBox', ['options'], cnf, kw) self.subwidget_list['dirlist'] = _dummyDirList(self, 'dirlist') self.subwidget_list['dircbx'] = _dummyFileComboBox(self, 'dircbx') class ExFileSelectBox(TixWidget): """ExFileSelectBox - MS Windows style file select box. It provides an convenient method for the user to select files. Subwidget Class --------- ----- cancel Button ok Button hidden Checkbutton types ComboBox dir ComboBox file ComboBox dirlist ScrolledListBox filelist ScrolledListBox""" def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixExFileSelectBox', ['options'], cnf, kw) self.subwidget_list['cancel'] = _dummyButton(self, 'cancel') self.subwidget_list['ok'] = _dummyButton(self, 'ok') self.subwidget_list['hidden'] = _dummyCheckbutton(self, 'hidden') self.subwidget_list['types'] = _dummyComboBox(self, 'types') self.subwidget_list['dir'] = _dummyComboBox(self, 'dir') self.subwidget_list['dirlist'] = _dummyDirList(self, 'dirlist') self.subwidget_list['file'] = _dummyComboBox(self, 'file') self.subwidget_list['filelist'] = _dummyScrolledListBox(self, 'filelist') def filter(self): self.tk.call(self._w, 'filter') def invoke(self): self.tk.call(self._w, 'invoke') # Should inherit from a Dialog class class DirSelectDialog(TixWidget): """The DirSelectDialog widget presents the directories in the file system in a dialog window. The user can use this dialog window to navigate through the file system to select the desired directory. Subwidgets Class ---------- ----- dirbox DirSelectDialog""" # FIXME: It should inherit -superclass tixDialogShell def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixDirSelectDialog', ['options'], cnf, kw) self.subwidget_list['dirbox'] = _dummyDirSelectBox(self, 'dirbox') # cancel and ok buttons are missing def popup(self): self.tk.call(self._w, 'popup') def popdown(self): self.tk.call(self._w, 'popdown') # Should inherit from a Dialog class class ExFileSelectDialog(TixWidget): """ExFileSelectDialog - MS Windows style file select dialog. It provides an convenient method for the user to select files. Subwidgets Class ---------- ----- fsbox ExFileSelectBox""" # FIXME: It should inherit -superclass tixDialogShell def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixExFileSelectDialog', ['options'], cnf, kw) self.subwidget_list['fsbox'] = _dummyExFileSelectBox(self, 'fsbox') def popup(self): self.tk.call(self._w, 'popup') def popdown(self): self.tk.call(self._w, 'popdown') class FileSelectBox(TixWidget): """ExFileSelectBox - Motif style file select box. It is generally used for the user to choose a file. FileSelectBox stores the files mostly recently selected into a ComboBox widget so that they can be quickly selected again. Subwidget Class --------- ----- selection ComboBox filter ComboBox dirlist ScrolledListBox filelist ScrolledListBox""" def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixFileSelectBox', ['options'], cnf, kw) self.subwidget_list['dirlist'] = _dummyScrolledListBox(self, 'dirlist') self.subwidget_list['filelist'] = _dummyScrolledListBox(self, 'filelist') self.subwidget_list['filter'] = _dummyComboBox(self, 'filter') self.subwidget_list['selection'] = _dummyComboBox(self, 'selection') def apply_filter(self): # name of subwidget is same as command self.tk.call(self._w, 'filter') def invoke(self): self.tk.call(self._w, 'invoke') # Should inherit from a Dialog class class FileSelectDialog(TixWidget): """FileSelectDialog - Motif style file select dialog. Subwidgets Class ---------- ----- btns StdButtonBox fsbox FileSelectBox""" # FIXME: It should inherit -superclass tixStdDialogShell def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixFileSelectDialog', ['options'], cnf, kw) self.subwidget_list['btns'] = _dummyStdButtonBox(self, 'btns') self.subwidget_list['fsbox'] = _dummyFileSelectBox(self, 'fsbox') def popup(self): self.tk.call(self._w, 'popup') def popdown(self): self.tk.call(self._w, 'popdown') class FileEntry(TixWidget): """FileEntry - Entry field with button that invokes a FileSelectDialog. The user can type in the filename manually. Alternatively, the user can press the button widget that sits next to the entry, which will bring up a file selection dialog. Subwidgets Class ---------- ----- button Button entry Entry""" # FIXME: It should inherit -superclass tixLabelWidget def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixFileEntry', ['dialogtype', 'options'], cnf, kw) self.subwidget_list['button'] = _dummyButton(self, 'button') self.subwidget_list['entry'] = _dummyEntry(self, 'entry') def invoke(self): self.tk.call(self._w, 'invoke') def file_dialog(self): # FIXME: return python object pass class HList(TixWidget): """HList - Hierarchy display widget can be used to display any data that have a hierarchical structure, for example, file system directory trees. The list entries are indented and connected by branch lines according to their places in the hierachy. Subwidgets - None""" def __init__ (self,master=None,cnf={}, **kw): TixWidget.__init__(self, master, 'tixHList', ['columns', 'options'], cnf, kw) def add(self, entry, cnf={}, **kw): return self.tk.call(self._w, 'add', entry, *self._options(cnf, kw)) def add_child(self, parent=None, cnf={}, **kw): if not parent: parent = '' return self.tk.call( self._w, 'addchild', parent, *self._options(cnf, kw)) def anchor_set(self, entry): self.tk.call(self._w, 'anchor', 'set', entry) def anchor_clear(self): self.tk.call(self._w, 'anchor', 'clear') def column_width(self, col=0, width=None, chars=None): if not chars: return self.tk.call(self._w, 'column', 'width', col, width) else: return self.tk.call(self._w, 'column', 'width', col, '-char', chars) def delete_all(self): self.tk.call(self._w, 'delete', 'all') def delete_entry(self, entry): self.tk.call(self._w, 'delete', 'entry', entry) def delete_offsprings(self, entry): self.tk.call(self._w, 'delete', 'offsprings', entry) def delete_siblings(self, entry): self.tk.call(self._w, 'delete', 'siblings', entry) def dragsite_set(self, index): self.tk.call(self._w, 'dragsite', 'set', index) def dragsite_clear(self): self.tk.call(self._w, 'dragsite', 'clear') def dropsite_set(self, index): self.tk.call(self._w, 'dropsite', 'set', index) def dropsite_clear(self): self.tk.call(self._w, 'dropsite', 'clear') def header_create(self, col, cnf={}, **kw): self.tk.call(self._w, 'header', 'create', col, *self._options(cnf, kw)) def header_configure(self, col, cnf={}, **kw): if cnf is None: return _lst2dict( self.tk.split( self.tk.call(self._w, 'header', 'configure', col))) self.tk.call(self._w, 'header', 'configure', col, *self._options(cnf, kw)) def header_cget(self, col, opt): return self.tk.call(self._w, 'header', 'cget', col, opt) def header_exists(self, col): return self.tk.call(self._w, 'header', 'exists', col) def header_delete(self, col): self.tk.call(self._w, 'header', 'delete', col) def header_size(self, col): return self.tk.call(self._w, 'header', 'size', col) def hide_entry(self, entry): self.tk.call(self._w, 'hide', 'entry', entry) def indicator_create(self, entry, cnf={}, **kw): self.tk.call( self._w, 'indicator', 'create', entry, *self._options(cnf, kw)) def indicator_configure(self, entry, cnf={}, **kw): if cnf is None: return _lst2dict( self.tk.split( self.tk.call(self._w, 'indicator', 'configure', entry))) self.tk.call( self._w, 'indicator', 'configure', entry, *self._options(cnf, kw)) def indicator_cget(self, entry, opt): return self.tk.call(self._w, 'indicator', 'cget', entry, opt) def indicator_exists(self, entry): return self.tk.call (self._w, 'indicator', 'exists', entry) def indicator_delete(self, entry): self.tk.call(self._w, 'indicator', 'delete', entry) def indicator_size(self, entry): return self.tk.call(self._w, 'indicator', 'size', entry) def info_anchor(self): return self.tk.call(self._w, 'info', 'anchor') def info_children(self, entry=None): c = self.tk.call(self._w, 'info', 'children', entry) return self.tk.splitlist(c) def info_data(self, entry): return self.tk.call(self._w, 'info', 'data', entry) def info_exists(self, entry): return self.tk.call(self._w, 'info', 'exists', entry) def info_hidden(self, entry): return self.tk.call(self._w, 'info', 'hidden', entry) def info_next(self, entry): return self.tk.call(self._w, 'info', 'next', entry) def info_parent(self, entry): return self.tk.call(self._w, 'info', 'parent', entry) def info_prev(self, entry): return self.tk.call(self._w, 'info', 'prev', entry) def info_selection(self): c = self.tk.call(self._w, 'info', 'selection') return self.tk.splitlist(c) def item_cget(self, entry, col, opt): return self.tk.call(self._w, 'item', 'cget', entry, col, opt) def item_configure(self, entry, col, cnf={}, **kw): if cnf is None: return _lst2dict( self.tk.split( self.tk.call(self._w, 'item', 'configure', entry, col))) self.tk.call(self._w, 'item', 'configure', entry, col, *self._options(cnf, kw)) def item_create(self, entry, col, cnf={}, **kw): self.tk.call( self._w, 'item', 'create', entry, col, *self._options(cnf, kw)) def item_exists(self, entry, col): return self.tk.call(self._w, 'item', 'exists', entry, col) def item_delete(self, entry, col): self.tk.call(self._w, 'item', 'delete', entry, col) def entrycget(self, entry, opt): return self.tk.call(self._w, 'entrycget', entry, opt) def entryconfigure(self, entry, cnf={}, **kw): if cnf is None: return _lst2dict( self.tk.split( self.tk.call(self._w, 'entryconfigure', entry))) self.tk.call(self._w, 'entryconfigure', entry, *self._options(cnf, kw)) def nearest(self, y): return self.tk.call(self._w, 'nearest', y) def see(self, entry): self.tk.call(self._w, 'see', entry) def selection_clear(self, cnf={}, **kw): self.tk.call(self._w, 'selection', 'clear', *self._options(cnf, kw)) def selection_includes(self, entry): return self.tk.call(self._w, 'selection', 'includes', entry) def selection_set(self, first, last=None): self.tk.call(self._w, 'selection', 'set', first, last) def show_entry(self, entry): return self.tk.call(self._w, 'show', 'entry', entry) def xview(self, *args): self.tk.call(self._w, 'xview', *args) def yview(self, *args): self.tk.call(self._w, 'yview', *args) class InputOnly(TixWidget): """InputOnly - Invisible widget. Unix only. Subwidgets - None""" def __init__ (self,master=None,cnf={}, **kw): TixWidget.__init__(self, master, 'tixInputOnly', None, cnf, kw) class LabelEntry(TixWidget): """LabelEntry - Entry field with label. Packages an entry widget and a label into one mega widget. It can beused be used to simplify the creation of ``entry-form'' type of interface. Subwidgets Class ---------- ----- label Label entry Entry""" def __init__ (self,master=None,cnf={}, **kw): TixWidget.__init__(self, master, 'tixLabelEntry', ['labelside','options'], cnf, kw) self.subwidget_list['label'] = _dummyLabel(self, 'label') self.subwidget_list['entry'] = _dummyEntry(self, 'entry') class LabelFrame(TixWidget): """LabelFrame - Labelled Frame container. Packages a frame widget and a label into one mega widget. To create widgets inside a LabelFrame widget, one creates the new widgets relative to the frame subwidget and manage them inside the frame subwidget. Subwidgets Class ---------- ----- label Label frame Frame""" def __init__ (self,master=None,cnf={}, **kw): TixWidget.__init__(self, master, 'tixLabelFrame', ['labelside','options'], cnf, kw) self.subwidget_list['label'] = _dummyLabel(self, 'label') self.subwidget_list['frame'] = _dummyFrame(self, 'frame') class ListNoteBook(TixWidget): """A ListNoteBook widget is very similar to the TixNoteBook widget: it can be used to display many windows in a limited space using a notebook metaphor. The notebook is divided into a stack of pages (windows). At one time only one of these pages can be shown. The user can navigate through these pages by choosing the name of the desired page in the hlist subwidget.""" def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixListNoteBook', ['options'], cnf, kw) # Is this necessary? It's not an exposed subwidget in Tix. self.subwidget_list['pane'] = _dummyPanedWindow(self, 'pane', destroy_physically=0) self.subwidget_list['hlist'] = _dummyHList(self, 'hlist') self.subwidget_list['shlist'] = _dummyScrolledHList(self, 'shlist') def add(self, name, cnf={}, **kw): self.tk.call(self._w, 'add', name, *self._options(cnf, kw)) self.subwidget_list[name] = TixSubWidget(self, name) return self.subwidget_list[name] def page(self, name): return self.subwidget(name) def pages(self): # Can't call subwidgets_all directly because we don't want .nbframe names = self.tk.split(self.tk.call(self._w, 'pages')) ret = [] for x in names: ret.append(self.subwidget(x)) return ret def raise_page(self, name): # raise is a python keyword self.tk.call(self._w, 'raise', name) class Meter(TixWidget): """The Meter widget can be used to show the progress of a background job which may take a long time to execute. """ def __init__(self, master=None, cnf={}, **kw): TixWidget.__init__(self, master, 'tixMeter', ['options'], cnf, kw) class NoteBook(TixWidget): """NoteBook - Multi-page container widget (tabbed notebook metaphor). Subwidgets Class ---------- ----- nbframe NoteBookFrame <pages> page widgets added dynamically with the add method""" def __init__ (self,master=None,cnf={}, **kw): TixWidget.__init__(self,master,'tixNoteBook', ['options'], cnf, kw) self.subwidget_list['nbframe'] = TixSubWidget(self, 'nbframe', destroy_physically=0) def add(self, name, cnf={}, **kw): self.tk.call(self._w, 'add', name, *self._options(cnf, kw)) self.subwidget_list[name] = TixSubWidget(self, name) return self.subwidget_list[name] def delete(self, name): self.tk.call(self._w, 'delete', name) self.subwidget_list[name].destroy() del self.subwidget_list[name] def page(self, name): return self.subwidget(name) def pages(self): # Can't call subwidgets_all directly because we don't want .nbframe names = self.tk.split(self.tk.call(self._w, 'pages')) ret = [] for x in names: ret.append(self.subwidget(x)) return ret def raise_page(self, name): # raise is a python keyword self.tk.call(self._w, 'raise', name) def raised(self): return self.tk.call(self._w, 'raised') class NoteBookFrame(TixWidget): # FIXME: This is dangerous to expose to be called on its own. pass class OptionMenu(TixWidget): """OptionMenu - creates a menu button of options. Subwidget Class --------- ----- menubutton Menubutton menu Menu""" def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixOptionMenu', ['options'], cnf, kw) self.subwidget_list['menubutton'] = _dummyMenubutton(self, 'menubutton') self.subwidget_list['menu'] = _dummyMenu(self, 'menu') def add_command(self, name, cnf={}, **kw): self.tk.call(self._w, 'add', 'command', name, *self._options(cnf, kw)) def add_separator(self, name, cnf={}, **kw): self.tk.call(self._w, 'add', 'separator', name, *self._options(cnf, kw)) def delete(self, name): self.tk.call(self._w, 'delete', name) def disable(self, name): self.tk.call(self._w, 'disable', name) def enable(self, name): self.tk.call(self._w, 'enable', name) class PanedWindow(TixWidget): """PanedWindow - Multi-pane container widget allows the user to interactively manipulate the sizes of several panes. The panes can be arranged either vertically or horizontally.The user changes the sizes of the panes by dragging the resize handle between two panes. Subwidgets Class ---------- ----- <panes> g/p widgets added dynamically with the add method.""" def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixPanedWindow', ['orientation', 'options'], cnf, kw) # add delete forget panecget paneconfigure panes setsize def add(self, name, cnf={}, **kw): self.tk.call(self._w, 'add', name, *self._options(cnf, kw)) self.subwidget_list[name] = TixSubWidget(self, name, check_intermediate=0) return self.subwidget_list[name] def delete(self, name): self.tk.call(self._w, 'delete', name) self.subwidget_list[name].destroy() del self.subwidget_list[name] def forget(self, name): self.tk.call(self._w, 'forget', name) def panecget(self, entry, opt): return self.tk.call(self._w, 'panecget', entry, opt) def paneconfigure(self, entry, cnf={}, **kw): if cnf is None: return _lst2dict( self.tk.split( self.tk.call(self._w, 'paneconfigure', entry))) self.tk.call(self._w, 'paneconfigure', entry, *self._options(cnf, kw)) def panes(self): names = self.tk.call(self._w, 'panes') ret = [] for x in names: ret.append(self.subwidget(x)) return ret class PopupMenu(TixWidget): """PopupMenu widget can be used as a replacement of the tk_popup command. The advantage of the Tix PopupMenu widget is it requires less application code to manipulate. Subwidgets Class ---------- ----- menubutton Menubutton menu Menu""" # FIXME: It should inherit -superclass tixShell def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixPopupMenu', ['options'], cnf, kw) self.subwidget_list['menubutton'] = _dummyMenubutton(self, 'menubutton') self.subwidget_list['menu'] = _dummyMenu(self, 'menu') def bind_widget(self, widget): self.tk.call(self._w, 'bind', widget._w) def unbind_widget(self, widget): self.tk.call(self._w, 'unbind', widget._w) def post_widget(self, widget, x, y): self.tk.call(self._w, 'post', widget._w, x, y) class ResizeHandle(TixWidget): """Internal widget to draw resize handles on Scrolled widgets.""" def __init__(self, master, cnf={}, **kw): # There seems to be a Tix bug rejecting the configure method # Let's try making the flags -static flags = ['options', 'command', 'cursorfg', 'cursorbg', 'handlesize', 'hintcolor', 'hintwidth', 'x', 'y'] # In fact, x y height width are configurable TixWidget.__init__(self, master, 'tixResizeHandle', flags, cnf, kw) def attach_widget(self, widget): self.tk.call(self._w, 'attachwidget', widget._w) def detach_widget(self, widget): self.tk.call(self._w, 'detachwidget', widget._w) def hide(self, widget): self.tk.call(self._w, 'hide', widget._w) def show(self, widget): self.tk.call(self._w, 'show', widget._w) class ScrolledHList(TixWidget): """ScrolledHList - HList with automatic scrollbars.""" # FIXME: It should inherit -superclass tixScrolledWidget def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixScrolledHList', ['options'], cnf, kw) self.subwidget_list['hlist'] = _dummyHList(self, 'hlist') self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb') self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb') class ScrolledListBox(TixWidget): """ScrolledListBox - Listbox with automatic scrollbars.""" # FIXME: It should inherit -superclass tixScrolledWidget def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixScrolledListBox', ['options'], cnf, kw) self.subwidget_list['listbox'] = _dummyListbox(self, 'listbox') self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb') self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb') class ScrolledText(TixWidget): """ScrolledText - Text with automatic scrollbars.""" # FIXME: It should inherit -superclass tixScrolledWidget def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixScrolledText', ['options'], cnf, kw) self.subwidget_list['text'] = _dummyText(self, 'text') self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb') self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb') class ScrolledTList(TixWidget): """ScrolledTList - TList with automatic scrollbars.""" # FIXME: It should inherit -superclass tixScrolledWidget def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixScrolledTList', ['options'], cnf, kw) self.subwidget_list['tlist'] = _dummyTList(self, 'tlist') self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb') self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb') class ScrolledWindow(TixWidget): """ScrolledWindow - Window with automatic scrollbars.""" # FIXME: It should inherit -superclass tixScrolledWidget def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixScrolledWindow', ['options'], cnf, kw) self.subwidget_list['window'] = _dummyFrame(self, 'window') self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb') self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb') class Select(TixWidget): """Select - Container of button subwidgets. It can be used to provide radio-box or check-box style of selection options for the user. Subwidgets are buttons added dynamically using the add method.""" # FIXME: It should inherit -superclass tixLabelWidget def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixSelect', ['allowzero', 'radio', 'orientation', 'labelside', 'options'], cnf, kw) self.subwidget_list['label'] = _dummyLabel(self, 'label') def add(self, name, cnf={}, **kw): self.tk.call(self._w, 'add', name, *self._options(cnf, kw)) self.subwidget_list[name] = _dummyButton(self, name) return self.subwidget_list[name] def invoke(self, name): self.tk.call(self._w, 'invoke', name) class Shell(TixWidget): """Toplevel window. Subwidgets - None""" def __init__ (self,master=None,cnf={}, **kw): TixWidget.__init__(self, master, 'tixShell', ['options', 'title'], cnf, kw) class DialogShell(TixWidget): """Toplevel window, with popup popdown and center methods. It tells the window manager that it is a dialog window and should be treated specially. The exact treatment depends on the treatment of the window manager. Subwidgets - None""" # FIXME: It should inherit from Shell def __init__ (self,master=None,cnf={}, **kw): TixWidget.__init__(self, master, 'tixDialogShell', ['options', 'title', 'mapped', 'minheight', 'minwidth', 'parent', 'transient'], cnf, kw) def popdown(self): self.tk.call(self._w, 'popdown') def popup(self): self.tk.call(self._w, 'popup') def center(self): self.tk.call(self._w, 'center') class StdButtonBox(TixWidget): """StdButtonBox - Standard Button Box (OK, Apply, Cancel and Help) """ def __init__(self, master=None, cnf={}, **kw): TixWidget.__init__(self, master, 'tixStdButtonBox', ['orientation', 'options'], cnf, kw) self.subwidget_list['ok'] = _dummyButton(self, 'ok') self.subwidget_list['apply'] = _dummyButton(self, 'apply') self.subwidget_list['cancel'] = _dummyButton(self, 'cancel') self.subwidget_list['help'] = _dummyButton(self, 'help') def invoke(self, name): if self.subwidget_list.has_key(name): self.tk.call(self._w, 'invoke', name) class TList(TixWidget): """TList - Hierarchy display widget which can be used to display data in a tabular format. The list entries of a TList widget are similar to the entries in the Tk listbox widget. The main differences are (1) the TList widget can display the list entries in a two dimensional format and (2) you can use graphical images as well as multiple colors and fonts for the list entries. Subwidgets - None""" def __init__ (self,master=None,cnf={}, **kw): TixWidget.__init__(self, master, 'tixTList', ['options'], cnf, kw) def active_set(self, index): self.tk.call(self._w, 'active', 'set', index) def active_clear(self): self.tk.call(self._w, 'active', 'clear') def anchor_set(self, index): self.tk.call(self._w, 'anchor', 'set', index) def anchor_clear(self): self.tk.call(self._w, 'anchor', 'clear') def delete(self, from_, to=None): self.tk.call(self._w, 'delete', from_, to) def dragsite_set(self, index): self.tk.call(self._w, 'dragsite', 'set', index) def dragsite_clear(self): self.tk.call(self._w, 'dragsite', 'clear') def dropsite_set(self, index): self.tk.call(self._w, 'dropsite', 'set', index) def dropsite_clear(self): self.tk.call(self._w, 'dropsite', 'clear') def insert(self, index, cnf={}, **kw): self.tk.call(self._w, 'insert', index, *self._options(cnf, kw)) def info_active(self): return self.tk.call(self._w, 'info', 'active') def info_anchor(self): return self.tk.call(self._w, 'info', 'anchor') def info_down(self, index): return self.tk.call(self._w, 'info', 'down', index) def info_left(self, index): return self.tk.call(self._w, 'info', 'left', index) def info_right(self, index): return self.tk.call(self._w, 'info', 'right', index) def info_selection(self): c = self.tk.call(self._w, 'info', 'selection') return self.tk.splitlist(c) def info_size(self): return self.tk.call(self._w, 'info', 'size') def info_up(self, index): return self.tk.call(self._w, 'info', 'up', index) def nearest(self, x, y): return self.tk.call(self._w, 'nearest', x, y) def see(self, index): self.tk.call(self._w, 'see', index) def selection_clear(self, cnf={}, **kw): self.tk.call(self._w, 'selection', 'clear', *self._options(cnf, kw)) def selection_includes(self, index): return self.tk.call(self._w, 'selection', 'includes', index) def selection_set(self, first, last=None): self.tk.call(self._w, 'selection', 'set', first, last) def xview(self, *args): self.tk.call(self._w, 'xview', *args) def yview(self, *args): self.tk.call(self._w, 'yview', *args) class Tree(TixWidget): """Tree - The tixTree widget can be used to display hierachical data in a tree form. The user can adjust the view of the tree by opening or closing parts of the tree.""" # FIXME: It should inherit -superclass tixScrolledWidget def __init__(self, master=None, cnf={}, **kw): TixWidget.__init__(self, master, 'tixTree', ['options'], cnf, kw) self.subwidget_list['hlist'] = _dummyHList(self, 'hlist') self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb') self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb') def autosetmode(self): '''This command calls the setmode method for all the entries in this Tree widget: if an entry has no child entries, its mode is set to none. Otherwise, if the entry has any hidden child entries, its mode is set to open; otherwise its mode is set to close.''' self.tk.call(self._w, 'autosetmode') def close(self, entrypath): '''Close the entry given by entryPath if its mode is close.''' self.tk.call(self._w, 'close', entrypath) def getmode(self, entrypath): '''Returns the current mode of the entry given by entryPath.''' return self.tk.call(self._w, 'getmode', entrypath) def open(self, entrypath): '''Open the entry given by entryPath if its mode is open.''' self.tk.call(self._w, 'open', entrypath) def setmode(self, entrypath, mode='none'): '''This command is used to indicate whether the entry given by entryPath has children entries and whether the children are visible. mode must be one of open, close or none. If mode is set to open, a (+) indicator is drawn next the the entry. If mode is set to close, a (-) indicator is drawn next the the entry. If mode is set to none, no indicators will be drawn for this entry. The default mode is none. The open mode indicates the entry has hidden children and this entry can be opened by the user. The close mode indicates that all the children of the entry are now visible and the entry can be closed by the user.''' self.tk.call(self._w, 'setmode', entrypath, mode) # Could try subclassing Tree for CheckList - would need another arg to init class CheckList(TixWidget): """The CheckList widget displays a list of items to be selected by the user. CheckList acts similarly to the Tk checkbutton or radiobutton widgets, except it is capable of handling many more items than checkbuttons or radiobuttons. """ # FIXME: It should inherit -superclass tixTree def __init__(self, master=None, cnf={}, **kw): TixWidget.__init__(self, master, 'tixCheckList', ['options'], cnf, kw) self.subwidget_list['hlist'] = _dummyHList(self, 'hlist') self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb') self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb') def autosetmode(self): '''This command calls the setmode method for all the entries in this Tree widget: if an entry has no child entries, its mode is set to none. Otherwise, if the entry has any hidden child entries, its mode is set to open; otherwise its mode is set to close.''' self.tk.call(self._w, 'autosetmode') def close(self, entrypath): '''Close the entry given by entryPath if its mode is close.''' self.tk.call(self._w, 'close', entrypath) def getmode(self, entrypath): '''Returns the current mode of the entry given by entryPath.''' return self.tk.call(self._w, 'getmode', entrypath) def open(self, entrypath): '''Open the entry given by entryPath if its mode is open.''' self.tk.call(self._w, 'open', entrypath) def getselection(self, mode='on'): '''Returns a list of items whose status matches status. If status is not specified, the list of items in the "on" status will be returned. Mode can be on, off, default''' c = self.tk.split(self.tk.call(self._w, 'getselection', mode)) return self.tk.splitlist(c) def getstatus(self, entrypath): '''Returns the current status of entryPath.''' return self.tk.call(self._w, 'getstatus', entrypath) def setstatus(self, entrypath, mode='on'): '''Sets the status of entryPath to be status. A bitmap will be displayed next to the entry its status is on, off or default.''' self.tk.call(self._w, 'setstatus', entrypath, mode) ########################################################################### ### The subclassing below is used to instantiate the subwidgets in each ### ### mega widget. This allows us to access their methods directly. ### ########################################################################### class _dummyButton(Button, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) class _dummyCheckbutton(Checkbutton, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) class _dummyEntry(Entry, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) class _dummyFrame(Frame, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) class _dummyLabel(Label, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) class _dummyListbox(Listbox, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) class _dummyMenu(Menu, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) class _dummyMenubutton(Menubutton, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) class _dummyScrollbar(Scrollbar, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) class _dummyText(Text, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) class _dummyScrolledListBox(ScrolledListBox, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) self.subwidget_list['listbox'] = _dummyListbox(self, 'listbox') self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb') self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb') class _dummyHList(HList, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) class _dummyScrolledHList(ScrolledHList, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) self.subwidget_list['hlist'] = _dummyHList(self, 'hlist') self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb') self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb') class _dummyTList(TList, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) class _dummyComboBox(ComboBox, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, ['fancy',destroy_physically]) self.subwidget_list['label'] = _dummyLabel(self, 'label') self.subwidget_list['entry'] = _dummyEntry(self, 'entry') self.subwidget_list['arrow'] = _dummyButton(self, 'arrow') self.subwidget_list['slistbox'] = _dummyScrolledListBox(self, 'slistbox') try: self.subwidget_list['tick'] = _dummyButton(self, 'tick') #cross Button : present if created with the fancy option self.subwidget_list['cross'] = _dummyButton(self, 'cross') except TypeError: # unavailable when -fancy not specified pass class _dummyDirList(DirList, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) self.subwidget_list['hlist'] = _dummyHList(self, 'hlist') self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb') self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb') class _dummyDirSelectBox(DirSelectBox, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) self.subwidget_list['dirlist'] = _dummyDirList(self, 'dirlist') self.subwidget_list['dircbx'] = _dummyFileComboBox(self, 'dircbx') class _dummyExFileSelectBox(ExFileSelectBox, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) self.subwidget_list['cancel'] = _dummyButton(self, 'cancel') self.subwidget_list['ok'] = _dummyButton(self, 'ok') self.subwidget_list['hidden'] = _dummyCheckbutton(self, 'hidden') self.subwidget_list['types'] = _dummyComboBox(self, 'types') self.subwidget_list['dir'] = _dummyComboBox(self, 'dir') self.subwidget_list['dirlist'] = _dummyScrolledListBox(self, 'dirlist') self.subwidget_list['file'] = _dummyComboBox(self, 'file') self.subwidget_list['filelist'] = _dummyScrolledListBox(self, 'filelist') class _dummyFileSelectBox(FileSelectBox, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) self.subwidget_list['dirlist'] = _dummyScrolledListBox(self, 'dirlist') self.subwidget_list['filelist'] = _dummyScrolledListBox(self, 'filelist') self.subwidget_list['filter'] = _dummyComboBox(self, 'filter') self.subwidget_list['selection'] = _dummyComboBox(self, 'selection') class _dummyFileComboBox(ComboBox, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) self.subwidget_list['dircbx'] = _dummyComboBox(self, 'dircbx') class _dummyStdButtonBox(StdButtonBox, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) self.subwidget_list['ok'] = _dummyButton(self, 'ok') self.subwidget_list['apply'] = _dummyButton(self, 'apply') self.subwidget_list['cancel'] = _dummyButton(self, 'cancel') self.subwidget_list['help'] = _dummyButton(self, 'help') class _dummyNoteBookFrame(NoteBookFrame, TixSubWidget): def __init__(self, master, name, destroy_physically=0): TixSubWidget.__init__(self, master, name, destroy_physically) class _dummyPanedWindow(PanedWindow, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) ######################## ### Utility Routines ### ######################## #mike Should tixDestroy be exposed as a wrapper? - but not for widgets. def OptionName(widget): '''Returns the qualified path name for the widget. Normally used to set default options for subwidgets. See tixwidgets.py''' return widget.tk.call('tixOptionName', widget._w) # Called with a dictionary argument of the form # {'*.c':'C source files', '*.txt':'Text Files', '*':'All files'} # returns a string which can be used to configure the fsbox file types # in an ExFileSelectBox. i.e., # '{{*} {* - All files}} {{*.c} {*.c - C source files}} {{*.txt} {*.txt - Text Files}}' def FileTypeList(dict): s = '' for type in dict.keys(): s = s + '{{' + type + '} {' + type + ' - ' + dict[type] + '}} ' return s # Still to be done: # tixIconView class CObjView(TixWidget): """This file implements the Canvas Object View widget. This is a base class of IconView. It implements automatic placement/adjustment of the scrollbars according to the canvas objects inside the canvas subwidget. The scrollbars are adjusted so that the canvas is just large enough to see all the objects. """ # FIXME: It should inherit -superclass tixScrolledWidget pass class Grid(TixWidget): '''The Tix Grid command creates a new window and makes it into a tixGrid widget. Additional options, may be specified on the command line or in the option database to configure aspects such as its cursor and relief. A Grid widget displays its contents in a two dimensional grid of cells. Each cell may contain one Tix display item, which may be in text, graphics or other formats. See the DisplayStyle class for more information about Tix display items. Individual cells, or groups of cells, can be formatted with a wide range of attributes, such as its color, relief and border. Subwidgets - None''' # valid specific resources as of Tk 8.4 # editdonecmd, editnotifycmd, floatingcols, floatingrows, formatcmd, # highlightbackground, highlightcolor, leftmargin, itemtype, selectmode, # selectunit, topmargin, def __init__(self, master=None, cnf={}, **kw): static= [] self.cnf= cnf TixWidget.__init__(self, master, 'tixGrid', static, cnf, kw) # valid options as of Tk 8.4 # anchor, bdtype, cget, configure, delete, dragsite, dropsite, entrycget, edit # entryconfigure, format, geometryinfo, info, index, move, nearest, selection # set, size, unset, xview, yview # def anchor option ?args ...? def anchor_get(self): "Get the (x,y) coordinate of the current anchor cell" return self._getints(self.tk.call(self, 'anchor', 'get')) # def bdtype # def delete dim from ?to? def delete_row(self, from_, to=None): """Delete rows between from_ and to inclusive. If to is not provided, delete only row at from_""" if to is None: self.tk.call(self, 'delete', 'row', from_) else: self.tk.call(self, 'delete', 'row', from_, to) def delete_column(self, from_, to=None): """Delete columns between from_ and to inclusive. If to is not provided, delete only column at from_""" if to is None: self.tk.call(self, 'delete', 'column', from_) else: self.tk.call(self, 'delete', 'column', from_, to) # def edit apply # def edit set x y def entrycget(self, x, y, option): "Get the option value for cell at (x,y)" return self.tk.call(self, 'entrycget', x, y, option) def entryconfigure(self, x, y, **kw): return self.tk.call(self, 'entryconfigure', x, y, *self._options(None, kw)) # def format # def index def info_exists(self, x, y): "Return True if display item exists at (x,y)" return bool(int(self.tk.call(self, 'info', 'exists', x, y))) def info_bbox(self, x, y): # This seems to always return '', at least for 'text' displayitems return self.tk.call(self, 'info', 'bbox', x, y) def nearest(self, x, y): "Return coordinate of cell nearest pixel coordinate (x,y)" return self._getints(self.tk.call(self, 'nearest', x, y)) # def selection adjust # def selection clear # def selection includes # def selection set # def selection toggle # def move dim from to offset def set(self, x, y, itemtype=None, **kw): args= self._options(self.cnf, kw) if itemtype is not None: args= ('-itemtype', itemtype) + args self.tk.call(self, 'set', x, y, *args) # def size dim index ?option value ...? # def unset x y def xview(self): return self._getdoubles(self.tk.call(self, 'xview')) def xview_moveto(self, fraction): self.tk.call(self,'xview', 'moveto', fraction) def xview_scroll(self, count, what="units"): "Scroll right (count>0) or left <count> of units|pages" self.tk.call(self, 'xview', 'scroll', count, what) def yview(self): return self._getdoubles(self.tk.call(self, 'yview')) def yview_moveto(self, fraction): self.tk.call(self,'ysview', 'moveto', fraction) def yview_scroll(self, count, what="units"): "Scroll down (count>0) or up <count> of units|pages" self.tk.call(self, 'yview', 'scroll', count, what) class ScrolledGrid(Grid): '''Scrolled Grid widgets''' # FIXME: It should inherit -superclass tixScrolledWidget def __init__(self, master=None, cnf={}, **kw): static= [] self.cnf= cnf TixWidget.__init__(self, master, 'tixScrolledGrid', static, cnf, kw)
apache-2.0
lxsmnv/spark
examples/src/main/python/sql/arrow.py
13
3997
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ A simple example demonstrating Arrow in Spark. Run with: ./bin/spark-submit examples/src/main/python/sql/arrow.py """ from __future__ import print_function from pyspark.sql import SparkSession from pyspark.sql.utils import require_minimum_pandas_version, require_minimum_pyarrow_version require_minimum_pandas_version() require_minimum_pyarrow_version() def dataframe_with_arrow_example(spark): # $example on:dataframe_with_arrow$ import numpy as np import pandas as pd # Enable Arrow-based columnar data transfers spark.conf.set("spark.sql.execution.arrow.enabled", "true") # Generate a Pandas DataFrame pdf = pd.DataFrame(np.random.rand(100, 3)) # Create a Spark DataFrame from a Pandas DataFrame using Arrow df = spark.createDataFrame(pdf) # Convert the Spark DataFrame back to a Pandas DataFrame using Arrow result_pdf = df.select("*").toPandas() # $example off:dataframe_with_arrow$ print("Pandas DataFrame result statistics:\n%s\n" % str(result_pdf.describe())) def scalar_pandas_udf_example(spark): # $example on:scalar_pandas_udf$ import pandas as pd from pyspark.sql.functions import col, pandas_udf from pyspark.sql.types import LongType # Declare the function and create the UDF def multiply_func(a, b): return a * b multiply = pandas_udf(multiply_func, returnType=LongType()) # The function for a pandas_udf should be able to execute with local Pandas data x = pd.Series([1, 2, 3]) print(multiply_func(x, x)) # 0 1 # 1 4 # 2 9 # dtype: int64 # Create a Spark DataFrame, 'spark' is an existing SparkSession df = spark.createDataFrame(pd.DataFrame(x, columns=["x"])) # Execute function as a Spark vectorized UDF df.select(multiply(col("x"), col("x"))).show() # +-------------------+ # |multiply_func(x, x)| # +-------------------+ # | 1| # | 4| # | 9| # +-------------------+ # $example off:scalar_pandas_udf$ def grouped_map_pandas_udf_example(spark): # $example on:grouped_map_pandas_udf$ from pyspark.sql.functions import pandas_udf, PandasUDFType df = spark.createDataFrame( [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)], ("id", "v")) @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) def substract_mean(pdf): # pdf is a pandas.DataFrame v = pdf.v return pdf.assign(v=v - v.mean()) df.groupby("id").apply(substract_mean).show() # +---+----+ # | id| v| # +---+----+ # | 1|-0.5| # | 1| 0.5| # | 2|-3.0| # | 2|-1.0| # | 2| 4.0| # +---+----+ # $example off:grouped_map_pandas_udf$ if __name__ == "__main__": spark = SparkSession \ .builder \ .appName("Python Arrow-in-Spark example") \ .getOrCreate() print("Running Pandas to/from conversion example") dataframe_with_arrow_example(spark) print("Running pandas_udf scalar example") scalar_pandas_udf_example(spark) print("Running pandas_udf grouped map example") grouped_map_pandas_udf_example(spark) spark.stop()
apache-2.0
mitocw/edx-platform
lms/djangoapps/course_api/tests/test_permissions.py
4
1738
""" Test authorization functions """ from django.contrib.auth.models import AnonymousUser from django.test import TestCase from ..permissions import can_view_courses_for_username from .mixins import CourseApiFactoryMixin class ViewCoursesForUsernameTestCase(CourseApiFactoryMixin, TestCase): """ Verify functionality of view_courses_for_username. Any user should be able to view their own courses, and staff users should be able to view anyone's courses. """ @classmethod def setUpClass(cls): super(ViewCoursesForUsernameTestCase, cls).setUpClass() cls.staff_user = cls.create_user('staff', is_staff=True) cls.honor_user = cls.create_user('honor', is_staff=False) cls.anonymous_user = AnonymousUser() def test_for_staff(self): self.assertTrue(can_view_courses_for_username(self.staff_user, self.staff_user.username)) def test_for_honor(self): self.assertTrue(can_view_courses_for_username(self.honor_user, self.honor_user.username)) def test_for_staff_as_honor(self): self.assertTrue(can_view_courses_for_username(self.staff_user, self.honor_user.username)) def test_for_honor_as_staff(self): self.assertFalse(can_view_courses_for_username(self.honor_user, self.staff_user.username)) def test_for_none_as_staff(self): with self.assertRaises(TypeError): can_view_courses_for_username(self.staff_user, None) def test_for_anonymous(self): self.assertTrue(can_view_courses_for_username(self.anonymous_user, self.anonymous_user.username)) def test_for_anonymous_as_honor(self): self.assertFalse(can_view_courses_for_username(self.anonymous_user, self.honor_user.username))
agpl-3.0
agconti/Shopify-Django
venv/lib/python2.7/site-packages/django/contrib/formtools/tests/wizard/namedwizardtests/forms.py
318
1705
import os import tempfile from django import forms from django.core.files.storage import FileSystemStorage from django.forms.formsets import formset_factory from django.http import HttpResponse from django.template import Template, Context from django.contrib.auth.models import User from django.contrib.formtools.wizard.views import NamedUrlWizardView temp_storage_location = tempfile.mkdtemp(dir=os.environ.get('DJANGO_TEST_TEMP_DIR')) temp_storage = FileSystemStorage(location=temp_storage_location) class Page1(forms.Form): name = forms.CharField(max_length=100) user = forms.ModelChoiceField(queryset=User.objects.all()) thirsty = forms.NullBooleanField() class Page2(forms.Form): address1 = forms.CharField(max_length=100) address2 = forms.CharField(max_length=100) file1 = forms.FileField() class Page3(forms.Form): random_crap = forms.CharField(max_length=100) Page4 = formset_factory(Page3, extra=2) class ContactWizard(NamedUrlWizardView): file_storage = temp_storage def done(self, form_list, **kwargs): c = Context({ 'form_list': [x.cleaned_data for x in form_list], 'all_cleaned_data': self.get_all_cleaned_data() }) for form in self.form_list.keys(): c[form] = self.get_cleaned_data_for_step(form) c['this_will_fail'] = self.get_cleaned_data_for_step('this_will_fail') return HttpResponse(Template('').render(c)) class SessionContactWizard(ContactWizard): storage_name = 'django.contrib.formtools.wizard.storage.session.SessionStorage' class CookieContactWizard(ContactWizard): storage_name = 'django.contrib.formtools.wizard.storage.cookie.CookieStorage'
mit
Scemoon/lpts
site-packages/pychart/basecanvas.py
11
18866
# # Copyright (C) 2000-2005 by Yasushi Saito (yasushi.saito@gmail.com) # # Jockey is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 2, or (at your option) any # later version. # # Jockey is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License # for more details. # import math import sys import time import re import font import pychart_util import theme import version from scaling import * def _compute_bounding_box(points): """Given the list of coordinates (x,y), this procedure computes the smallest rectangle that covers all the points.""" (xmin, ymin, xmax, ymax) = (999999, 999999, -999999, -999999) for p in points: xmin = min(xmin, p[0]) xmax = max(xmax, p[0]) ymin = min(ymin, p[1]) ymax = max(ymax, p[1]) return (xmin, ymin, xmax, ymax) def _intersect_box(b1, b2): xmin = max(b1[0], b2[0]) ymin = max(b1[1], b2[1]) xmax = min(b1[2], b2[2]) ymax = min(b1[3], b2[3]) return (xmin, ymin, xmax, ymax) def invisible_p(x, y): """Return true if the point (X, Y) is visible in the canvas.""" if x < -499999 or y < -499999: return 1 return 0 def to_radian(deg): return deg*2*math.pi / 360.0 def midpoint(p1, p2): return ( (p1[0]+p2[0])/2.0, (p1[1]+p2[1])/2.0 ) active_canvases = [] InvalidCoord = 999999 class T(object): def __init__(self): global active_canvases self.__xmax = -InvalidCoord self.__xmin = InvalidCoord self.__ymax = -InvalidCoord self.__ymin = InvalidCoord self.__clip_box = (-InvalidCoord, -InvalidCoord, InvalidCoord, InvalidCoord) self.__clip_stack = [] self.__nr_gsave = 0 self.title = theme.title or re.sub("(.*)\\.py$", "\\1", sys.argv[0]) self.creator = theme.creator or "pychart %s" % (version.version,) self.creation_date = theme.creation_date or \ time.strftime("(%m/%d/%y) (%I:%M %p)") self.author = theme.author self.aux_comments = theme.aux_comments or "" active_canvases.append(self) def set_title(self, s): """Define the string to be shown in EPS/PDF "Title" field. The default value is the name of the script that creates the EPS/PDF file.""" self.title = s def set_creator(self, tag): """Define the string to be shown in EPS %%Creator or PDF Producer field. The default value is "pychart".""" self.creator = tag def set_creation_date(self, s): """Define the string to be shown in EPS/PDF "CreationDate" field. Defalt value of this field is the current time.""" self.creation_date = s def set_author(self, s): """Set the author string. Unless this method is called, the Author field is not output in EPS or PDF.""" self.author = s def add_aux_comments(self, s): """Define an auxiliary comments to be output to the file, just after the required headers""" self.aux_comments += s def close(self): """This method closes the canvas and writes contents to the associated file. Calling this procedure is optional, because Pychart calls this procedure for every open canvas on normal exit.""" for i in range(0, len(active_canvases)): if active_canvases[i] == self: del active_canvases[i] return def open_output(self, fname): """Open the output file FNAME. Returns tuple (FD, NEED_CLOSE), where FD is a file (or file-like) object, and NEED_CLOSE is a boolean flag that tells whether FD.close() should be called after finishing writing to the file. FNAME can be one of the three things: (1) None, in which case (sys.stdout, False) is returned. (2) A file-like object, in which case (fname, False) is returned. (3) A string, in which case this procedure opens the file and returns (fd, True).""" if not fname: return (sys.stdout, False) elif isinstance(fname, str): return (file(fname, "wb"), True) else: if not hasattr(fname, "write"): raise Exception, "Expecting either a filename or a file-like object, but got %s" % fname return (fname, False) def setbb(self, x, y): """Call this method when point (X,Y) is to be drawn in the canvas. This methods expands the bounding box to include this point.""" self.__xmin = min(self.__xmin, max(x, self.__clip_box[0])) self.__xmax = max(self.__xmax, min(x, self.__clip_box[2])) self.__ymin = min(self.__ymin, max(y, self.__clip_box[1])) self.__ymax = max(self.__ymax, min(y, self.__clip_box[3])) def fill_with_pattern(self, pat, x1, y1, x2, y2): if invisible_p(x2, y2): return self.comment("FILL pat=%s (%d %d)-(%d %d)\n" % (pat, x1, y1, x2, y2)) self.set_fill_color(pat.bgcolor) self._path_polygon([(x1, y1), (x1, y2), (x2, y2), (x2, y1)]) self.fill() pat.draw(self, x1, y1, x2, y2) self.comment("end FILL.\n") def _path_polygon(self, points): "Low-level polygon-drawing routine." (xmin, ymin, xmax, ymax) = _compute_bounding_box(points) if invisible_p(xmax, ymax): return self.setbb(xmin, ymin) self.setbb(xmax, ymax) self.newpath() self.moveto(xscale(points[0][0]), yscale(points[0][1])) for point in points[1:]: self.lineto(xscale(point[0]), yscale(point[1])) self.closepath() def polygon(self, edge_style, pat, points, shadow = None): """Draw a polygon with EDGE_STYLE, fill with PAT, and the edges POINTS. POINTS is a sequence of coordinates, e.g., ((10,10), (15,5), (20,8)). SHADOW is either None or a tuple (XDELTA, YDELTA, fillstyle). If non-null, a shadow of FILLSTYLE is drawn beneath the polygon at the offset of (XDELTA, YDELTA).""" if pat: self.comment("POLYGON points=[%s] pat=[%s]" % (str(points), str(pat))) (xmin, ymin, xmax, ymax) = _compute_bounding_box(points) if shadow: xoff, yoff, shadow_pat = shadow self.gsave() self._path_polygon(map(lambda p, xoff=xoff, yoff=yoff: (p[0]+xoff, p[1]+yoff), points)) self.clip_sub() self.fill_with_pattern(shadow_pat, xmin+xoff, ymin+yoff, xmax+xoff, ymax+yoff) self.grestore() self.gsave() self._path_polygon(points) self.clip_sub() self.fill_with_pattern(pat, xmin, ymin, xmax, ymax) self.grestore() if edge_style: self.comment("POLYGON points=[%s] edge=[%s]" % (str(points), str(edge_style))) self.set_line_style(edge_style) self._path_polygon(points) self.stroke() def set_background(self, pat, x1, y1, x2, y2): xmax, xmin, ymax, ymin = self.__xmax, self.__xmin, self.__ymax, self.__ymin self.rectangle(None, pat, x1, y1, x2, y2) self.__xmax, self.__xmin, self.__ymax, self.__ymin = xmax, xmin, ymax, ymin def rectangle(self, edge_style, pat, x1, y1, x2, y2, shadow = None): """Draw a rectangle with EDGE_STYLE, fill with PAT, and the bounding box (X1, Y1, X2, Y2). SHADOW is either None or a tuple (XDELTA, YDELTA, fillstyle). If non-null, a shadow of FILLSTYLE is drawn beneath the polygon at the offset of (XDELTA, YDELTA).""" self.polygon(edge_style, pat, [(x1,y1), (x1,y2), (x2,y2), (x2, y1)], shadow) def _path_ellipsis(self, x, y, radius, ratio, start_angle, end_angle): self.setbb(x - radius, y - radius*ratio) self.setbb(x + radius, y + radius*ratio) oradius = nscale(radius) centerx, centery = xscale(x), yscale(y) startx, starty = centerx+oradius * math.cos(to_radian(start_angle)), \ centery+oradius * math.sin(to_radian(start_angle)) self.moveto(centerx, centery) if start_angle % 360 != end_angle % 360: self.moveto(centerx, centery) self.lineto(startx, starty) else: self.moveto(startx, starty) self.path_arc(xscale(x), yscale(y), nscale(radius), ratio, start_angle, end_angle) self.closepath() def ellipsis(self, line_style, pattern, x, y, radius, ratio = 1.0, start_angle=0, end_angle=360, shadow=None): """Draw an ellipsis with line_style and fill PATTERN. The center is \ (X, Y), X radius is RADIUS, and Y radius is RADIUS*RATIO, whose \ default value is 1.0. SHADOW is either None or a tuple (XDELTA, YDELTA, fillstyle). If non-null, a shadow of FILLSTYLE is drawn beneath the polygon at the offset of (XDELTA, YDELTA).""" if invisible_p(x + radius, y + radius*ratio): return if pattern: if shadow: x_off, y_off, shadow_pat = shadow self.gsave() self.newpath() self._path_ellipsis(x+x_off, y+y_off, radius, ratio, start_angle, end_angle) self.clip_sub() self.fill_with_pattern(shadow_pat, x-radius*2+x_off, y-radius*ratio*2+y_off, x+radius*2+x_off, y+radius*ratio*2+y_off) self.grestore() self.gsave() self.newpath() self._path_ellipsis(x, y, radius, ratio, start_angle, end_angle) self.clip_sub() self.fill_with_pattern(pattern, (x-radius*2), (y-radius*ratio*2), (x+radius*2), (y+radius*ratio*2)) self.grestore() if line_style: self.set_line_style(line_style) self.newpath() self._path_ellipsis(x, y, radius, ratio, start_angle, end_angle) self.stroke() def clip_ellipsis(self, x, y, radius, ratio = 1.0): """Create an elliptical clip region. You must call endclip() after you completed drawing. See also the ellipsis method.""" self.gsave() self.newpath() self.moveto(xscale(x)+nscale(radius), yscale(y)) self.path_arc(xscale(x), yscale(y), nscale(radius), ratio, 0, 360) self.closepath() self.__clip_stack.append(self.__clip_box) self.clip_sub() def clip_polygon(self, points): """Create a polygonal clip region. You must call endclip() after you completed drawing. See also the polygon method.""" self.gsave() self._path_polygon(points) self.__clip_stack.append(self.__clip_box) self.__clip_box = _intersect_box(self.__clip_box, _compute_bounding_box(points)) self.clip_sub() def clip(self, x1, y1, x2, y2): """Activate a rectangular clip region, (X1, Y1) - (X2, Y2). You must call endclip() after you completed drawing. canvas.clip(x,y,x2,y2) draw something ... canvas.endclip() """ self.__clip_stack.append(self.__clip_box) self.__clip_box = _intersect_box(self.__clip_box, (x1, y1, x2, y2)) self.gsave() self.newpath() self.moveto(xscale(x1), yscale(y1)) self.lineto(xscale(x1), yscale(y2)) self.lineto(xscale(x2), yscale(y2)) self.lineto(xscale(x2), yscale(y1)) self.closepath() self.clip_sub() def endclip(self): """End the current clip region. When clip calls are nested, it ends the most recently created crip region.""" self.__clip_box = self.__clip_stack[-1] del self.__clip_stack[-1] self.grestore() def curve(self, style, points): for p in points: self.setbb(p[0], p[1]) self.newpath() self.set_line_style(style) self.moveto(xscale(points[0][0]), xscale(points[0][1])) i = 1 n = 1 while i < len(points): if n == 1: x2 = points[i] n += 1 elif n == 2: x3 = points[i] n += 1 elif n == 3: x4 = midpoint(x3, points[i]) self.curveto(xscale(x2[0]), xscale(x2[1]), xscale(x3[0]), xscale(x3[1]), xscale(x4[0]), xscale(x4[1])) n = 1 i += 1 if n == 1: pass if n == 2: self.lineto(xscale(x2[0]), xscale(x2[1])) if n == 3: self.curveto(xscale(x2[0]), xscale(x2[1]), xscale(x2[0]), xscale(x2[1]), xscale(x3[0]), xscale(x3[1])) self.stroke() def line(self, style, x1, y1, x2, y2): if not style: return if invisible_p(x2, y2) and invisible_p(x1, y1): return self.setbb(x1, y1) self.setbb(x2, y2) self.newpath() self.set_line_style(style) self.moveto(xscale(x1), yscale(y1)) self.lineto(xscale(x2), yscale(y2)) self.stroke() def lines(self, style, segments): if not style: return (xmin, ymin, xmax, ymax) = _compute_bounding_box(segments) if invisible_p(xmax, ymax): return self.setbb(xmin, ymin) self.setbb(xmax, ymax) self.newpath() self.set_line_style(style) self.moveto(xscale(segments[0][0]), xscale(segments[0][1])) for i in range(1, len(segments)): self.lineto(xscale(segments[i][0]), yscale(segments[i][1])) self.stroke() def _path_round_rectangle(self, x1, y1, x2, y2, radius): self.moveto(xscale(x1 + radius), yscale(y1)) self.lineto(xscale(x2 - radius), yscale(y1)) self.path_arc(xscale(x2-radius), yscale(y1+radius), nscale(radius), 1, 270, 360) self.lineto(xscale(x2), yscale(y2-radius)) self.path_arc(xscale(x2-radius), yscale(y2-radius), nscale(radius), 1, 0, 90) self.lineto(xscale(x1+radius), yscale(y2)) self.path_arc(xscale(x1 + radius), yscale(y2 - radius), nscale(radius), 1, 90, 180) self.lineto(xscale(x1), xscale(y1+radius)) self.path_arc(xscale(x1 + radius), yscale(y1 + radius), nscale(radius), 1, 180, 270) def round_rectangle(self, style, fill, x1, y1, x2, y2, radius, shadow=None): """Draw a rectangle with rounded four corners. Parameter <radius> specifies the radius of each corner.""" if invisible_p(x2, y2): return self.setbb(x1, y1) self.setbb(x2, y2) if fill: if shadow: x_off, y_off, shadow_fill = shadow self.gsave(); self.newpath() self._path_round_rectangle(x1+x_off, y1+y_off, x2+x_off, y2+y_off, radius) self.closepath() self.clip_sub() self.fill_with_pattern(shadow_fill, x1+x_off, y1+y_off, x2+x_off, y2+y_off) self.grestore() self.gsave(); self.newpath() self._path_round_rectangle(x1, y1, x2, y2, radius) self.closepath() self.clip_sub() self.fill_with_pattern(fill, x1, y1, x2, y2) self.grestore() if style: self.set_line_style(style) self.newpath() self._path_round_rectangle(x1, y1, x2, y2, radius) self.closepath() self.stroke() def show(self, x, y, str): global out y_org = y org_str = str if invisible_p(x, y): return (xmin, xmax, ymin, ymax) = font.get_dimension(str) # rectangle(line_style.default, None, x+xmin, y+ymin, x+xmax, y+ymax) # ellipsis(line_style.default, None, x, y, 1) self.setbb(x+xmin, y+ymin) self.setbb(x+xmax, y+ymax) (halign, valign, angle) = font.get_align(str) base_x = x base_y = y # Handle vertical alignment if valign == "B": y = font.unaligned_text_height(str) elif valign == "T": y = 0 elif valign == "M": y = font.unaligned_text_height(str) / 2.0 (xmin, xmax, ymin, ymax) = font.get_dimension(org_str) self.setbb(x+xmin, y_org+y+ymin) self.setbb(x+xmax, y_org+y+ymax) itr = font.text_iterator(None) max_width = 0 lines = [] for line in str.split('\n'): cur_width = 0 cur_height = 0 itr.reset(line) strs = [] while 1: elem = itr.next() if not elem: break (font_name, size, line_height, color, _h, _v, _a, str) = elem cur_width += font.line_width(font_name, size, str) max_width = max(cur_width, max_width) cur_height = max(cur_height, line_height) # replace '(' -> '\(', ')' -> '\)' to make # Postscript string parser happy. str = str.replace("(", "\\(") str = str.replace(")", "\\)") strs.append((font_name, size, color, str)) lines.append((cur_width, cur_height, strs)) for line in lines: cur_width, cur_height, strs = line cur_y = y - cur_height y = y - cur_height self.comment("cury: %d hei %d str %s\n" % (cur_y, cur_height, strs)) if halign == 'C': cur_x = -cur_width/2.0 elif halign == 'R': cur_x = -cur_width else: cur_x = 0 rel_x, rel_y = pychart_util.rotate(cur_x, cur_y, angle) self.text_begin() self.text_moveto(xscale(base_x + rel_x), yscale(base_y + rel_y), angle) for segment in strs: font_name, size, color, str = segment self.text_show(font_name, nscale(size), color, str) self.text_end()
gpl-2.0
benjaoming/kolibri
kolibri/content/migrations/0009_auto_20180410_1139.py
3
1620
# -*- coding: utf-8 -*- # Generated by Django 1.11.11 on 2018-04-10 18:39 from __future__ import unicode_literals from django.db import migrations from django.db import models class Migration(migrations.Migration): dependencies = [ ('content', '0008_contentnode_coach_content'), ] operations = [ migrations.AlterField( model_name='file', name='preset', field=models.CharField(blank=True, choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('vector_video', 'Vectorized'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('document', 'Document'), ('epub', 'ePub Document'), ('document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_thumbnail', 'HTML5 Thumbnail')], max_length=150), ), migrations.AlterField( model_name='localfile', name='extension', field=models.CharField(blank=True, choices=[('mp4', 'MP4 Video'), ('vtt', 'VTT Subtitle'), ('srt', 'SRT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ('png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('zip', 'HTML5 Zip'), ('epub', 'ePub Document')], max_length=40), ), ]
mit
tuxos/tweepy
tests/test_rate_limit.py
47
5416
import unittest import os from tweepy import API, Cursor from tweepy.error import TweepError import six if six.PY3: import unittest else: import unittest2 as unittest from .config import create_auth testratelimit = 'TEST_RATE_LIMIT' in os.environ @unittest.skipIf(not testratelimit, "skipping rate limiting test since testratelimit is not specified") class TweepyRateLimitTests(unittest.TestCase): def setUp(self): self.api = API(create_auth()) self.api.retry_count = 2 self.api.retry_delay = 5 self.api.retry_errors = set([401, 404, 503]) self.api.wait_on_rate_limit = True def testratelimit(self): # should cause the api to sleep test_user_ids = [123796151, 263168076, 990027860, 901955678, 214630268, 18305040, 36126818, 312483939, 426975332, 469837158, 1104126054, 1342066705, 281632872, 608977002, 242901099, 846643308, 1166401645, 153886833, 95314037, 314458230, 149856382, 287916159, 472506496, 267180736, 251764866, 351035524, 997113991, 445915272, 57335947, 251043981, 95051918, 200761489, 48341139, 972660884, 422330517, 326429297, 864927896, 94183577, 95887514, 220807325, 194330782, 58796741, 1039212709, 1017192614, 625828008, 66539548, 320566383, 309829806, 571383983, 382694863, 439140530, 93977882, 277651636, 19984414, 502004733, 1093673143, 60014776, 469849460, 937107642, 155516395, 1272979644, 617433802, 102212981, 301228831, 805784562, 427799926, 322298054, 162197537, 554001783, 89252046, 536789199, 177807568, 805044434, 495541739, 392904916, 154656981, 291266775, 865454102, 475846642, 56910044, 55834550, 177389790, 339841061, 319614526, 954529597, 595960038, 501301480, 15679722, 938090731, 495829228, 325034224, 1041031410, 18882803, 161080540, 456245496, 636854521, 811974907, 222085372, 222306563, 422846724, 281616645, 223641862, 705786134, 1038901512, 174211339, 426795277, 370259272, 34759594, 366410456, 320577812, 757211413, 483238166, 222624369, 29425605, 456455726, 408723740, 1274608346, 295837985, 273490210, 232497444, 726843685, 465232166, 18850087, 22503721, 259629354, 414250375, 1259941938, 777167150, 1080552157, 1271036282, 1000551816, 109443357, 345781858, 45113654, 406536508, 253801866, 98836799, 395469120, 252920129, 604660035, 69124420, 283459909, 482261729, 377767308, 565240139, 191788429, 102048080, 330054371, 527868245, 177044049, 1250978114, 424042840, 15810905, 389030234, 69324415, 15638877, 159080798, 378708319, 549183840, 1034658145, 629924195, 969130340, 1143593845, 188129639, 535863656, 552452458, 1325277547, 756236624, 48421608, 178495858, 566206836, 378519925, 22678249, 377659768, 102326650, 76783997, 440716178, 49062271, 26296705, 1328036587, 289644932, 305767830, 437305735, 124821901, 591735533, 155140501, 1099612568, 631398810, 469295515, 131350941, 325804447, 529801632, 977197808, 232613818, 614777251, 229261732, 255533478, 256942503, 169583016, 237860252, 29257799, 276668845, 871571886, 398162507, 451954078, 526016951, 285655480, 1281827257, 340042172, 146653629, 61055423, 33407417, 95582321, 237420995, 310960580, 1222064886, 16490950, 60924360, 81928649, 374424010, 45703629, 817455571, 336077264, 400268024, 1203200467, 457105876, 232309205, 45838026, 91972056, 226927065, 82125276, 760131962, 1032274398, 562552291, 155155166, 146464315, 864864355, 128655844, 589747622, 293290470, 192004584, 19100402, 133931498, 19775979, 446374381, 1175241198, 20128240, 332395944, 74575955, 247407092, 427794934, 329823657, 405742072, 497475320, 997384698, 147718652, 757768705, 96757163, 289874437, 29892071, 568541704, 297039276, 356590090, 502055438, 291826323, 238944785, 71483924, 50031538, 863355416, 120273668, 224403994, 14880858, 1241506364, 848962080, 57898416, 599695908, 1222132262, 54045447, 907207212, 851412402, 454418991, 231844616, 618447410, 602997300, 447685173, 19681556, 22233657, 509901138, 184705596, 307624714, 553017923, 1249878596, 33727045, 419873350, 789307489, 287531592, 399163977, 1069425228, 920789582, 136891149, 134857296, 358558478, 436855382, 963011161, 195764827, 548872797, 1058980446, 442376799, 578216544, 527147110, 122077799, 1004773993, 420332138, 514994279, 61530732, 133462802, 19513966, 1286972018, 786121332, 265863798, 221258362, 42656382, 43631231, 198264256, 944382595, 37387030, 260948614, 314406408, 296512982, 92830743, 24519306, 21070476, 454107789, 331006606, 939713168, 256197265, 30065299, 74774188, 1332842606, 289424023, 526992024, 429933209, 116384410, 762143389, 308093598, 421208736, 454943394, 66026267, 158851748, 257550092, 70697073, 903627432, 290669225, 121168557, 92994330, 67642033, 635183794, 499303091, 421205146, 1252648171, 375268025, 16281866, 211960508, 267179466, 129016511, 157172416, 373370004, 167781059, 43624522] for user_id in test_user_ids: try: self.api.user_timeline(user_id=user_id, count=1, include_rts=True) except TweepError as e: # continue if we're not autherized to access the user's timeline or she doesn't exist anymore if e.response is not None and e.response.status in set([401, 404]): continue raise e if __name__ == '__main__': oauth_consumer_key = os.environ.get('CONSUMER_KEY', '') if testratelimit: unittest.TextTestRunner().run(unittest.loader.makeSuite(TweepyRateLimitTests)) else: unittest.main()
mit
ecino/compassion-modules
mobile_app_connector/models/wordpress_post.py
2
10124
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2019 Compassion CH (http://www.compassion.ch) # @author: Emanuel Cino <ecino@compassion.ch> # # The licence is in the file __manifest__.py # ############################################################################## import logging from HTMLParser import HTMLParser from ..tools import wp_requests from odoo import api, models, fields, _ _logger = logging.getLogger(__name__) class WordpressPost(models.Model): """ This serves as a local cache of all Published articles on the WP website, in order to answer fast to mobile app queries for rendering the main hub of the users. """ _name = 'wp.post' _description = 'Wordpress post' _order = 'date desc' name = fields.Char('Title', required=True) date = fields.Datetime(required=True) wp_id = fields.Integer('Wordpress Post ID', required=True, index=True) url = fields.Char('URL') image_url = fields.Char() post_type = fields.Selection([ ('posts', 'News'), ('agendas', 'Agenda'), ('download', 'Download') ]) category_ids = fields.Many2many('wp.post.category', string='Categories') lang = fields.Selection('select_lang', 'Language', required=True) display_on_hub = fields.Boolean( default=True, help='Deactivate in order to hide tiles in App.') view_order = fields.Integer('View order', required=True, default=6000) is_automatic_ordering = fields.Boolean("Automatic ordering", default=True) tile_type = fields.Selection([ ('Prayer', 'Prayer'), ('Story', 'Story') ], compute='_compute_tile_type', inverse='_inverse_tile_type', store=True) tile_subtype = fields.Selection([ ('PR2', 'PR2'), ('ST_T1', 'ST_T1'), ], compute='_compute_tile_subtype') _sql_constraints = [ ('wp_unique', 'unique(wp_id)', 'This post already exists') ] @api.multi @api.depends('category_ids', 'category_ids.default_tile_type') def _compute_tile_type(self): for post in self: default_types = post.category_ids.mapped('default_tile_type') if default_types and not post.tile_type: post.tile_type = default_types[0] @api.multi def _compute_tile_subtype(self): for post in self: post.tile_subtype = 'PR2' if post.tile_type == 'Prayer' \ else 'ST_T1' @api.multi def _inverse_tile_type(self): # Simply allows to write in field return True @api.model def select_lang(self): langs = self.env['res.lang'].search([]) return [(lang.code, lang.name) for lang in langs] @api.onchange('display_on_hub') def onchange_display_on_hub(self): """ If the user activate the display on hub, notify that the wordpress post should have some content. :return: warning to user """ if self.display_on_hub: return { 'warning': { 'title': _("Warning"), 'message': _( "This post was disabled probably because it doesn't " "have a content. Please make sure the post has a " "body to avoid any display issues in the mobile app.") }, } @api.model def fetch_posts(self, post_type): """ This is called by a CRON job in order to refresh the cache of published posts in the website. https://developer.wordpress.org/rest-api/reference/posts/ :param post_type: the post type to fetch :return: True """ _logger.info("Fetch Wordpress %s started!", post_type) wp_config = self.env['wordpress.configuration'].get_config() # This is standard Wordpress REST API URL wp_api_url = 'https://' + wp_config.host + '/wp-json/wp/v2/' \ + post_type # This is for avoid loading all post content params = {'context': 'embed', 'per_page': 100} found_ids = [] try: h = HTMLParser() with wp_requests.Session(wp_config) as requests: for lang in self._supported_langs(): params['lang'] = lang.code[:2] wp_posts = requests.get(wp_api_url, params=params).json() _logger.info('Processing posts in %s', lang.name) for i, post_data in enumerate(wp_posts): _logger.info("...processing post %s/%s", str(i+1), str(len(wp_posts))) post_id = post_data['id'] found_ids.append(post_id) cached_post = self.search([('wp_id', '=', post_id)]) if cached_post: cached_post.update_post_categories( post_data, requests) # Skip post already fetched continue content_empty = True self_url = post_data['_links']['self'][0]['href'] http_response = requests.get(self_url) if http_response.ok: content = http_response.json() if content['content']['rendered']: content_empty = False try: # Fetch image for thumbnail image_json_url = post_data['_links'][ 'wp:featuredmedia'][0]['href'] image_json = requests.get(image_json_url).json() if '.jpg' in image_json['media_details']['sizes'][ 'medium']['source_url']: image_url = \ image_json['media_details']['sizes'][ 'medium']['source_url'] else: image_url = image_json['source_url'] except KeyError: # Some post images may not be accessible image_url = False _logger.warning('WP Post ID %s has no image', str(post_id)) # Fetch post category categories_id = self._fetch_categories_ids(post_data, requests) # Cache new post in database self.create({ 'name': h.unescape(post_data['title']['rendered']), 'date': post_data['date'], 'wp_id': post_id, 'url': post_data['link'], 'image_url': image_url, 'post_type': post_type, 'category_ids': [(6, 0, categories_id)], 'lang': lang.code, 'display_on_hub': not content_empty }) # Delete unpublished posts self.search([('wp_id', 'not in', found_ids), ('post_type', '=', post_type)]).unlink() _logger.info("Fetch Wordpress Posts finished!") except ValueError: _logger.warning("Error fetching wordpress posts", exc_info=True) return True def update_post_categories(self, post_data, requests): """ Update the categories from a post, given the JSON data received. :param post_data: JSON data from the Wordpress API :param requests: The Wordpress API session :return: None """ self.ensure_one() categories_id = self._fetch_categories_ids(post_data, requests) # If there is a difference between categories if sorted(self.category_ids.ids) != sorted(categories_id): self.write({ 'category_ids': [(6, 0, categories_id)], }) self.update_display_on_hub() def _fetch_categories_ids(self, post_data, requests): """ Get the wordpress category ids associated to the JSON post data. :param post_data: JSON post data retrieved from Wordpress API :param requests: The Wordpress API session :return: list of wp.post.category record ids """ categories_id = [] category_obj = self.env['wp.post.category'] try: category_data = [ d for d in post_data['_links']['wp:term'] if d['taxonomy'] == 'category' ][0] category_json_url = category_data['href'] categories_request = requests.get( category_json_url).json() for c in categories_request: category = category_obj.search([ ('name', '=', c['name'])]) if not category: category = category_obj.create({ 'name': c['name'] }) categories_id.append(category.id) except (IndexError, KeyError): _logger.info('WP Post ID %s has no category.', str(post_data['id'])) return categories_id def update_display_on_hub(self): """ Compute visibility of post based on the visibility of its categories. It will be visible if at least one category is visible. """ for post in self: post.display_on_hub = post.category_ids.filtered('display_on_hub') @api.model def _supported_langs(self): """ Will fetch all wordpress posts in the given langs :return: res.lang recordset """ return self.env['res.lang'].search([('code', '!=', 'en_US')])
agpl-3.0
CCI-MOC/k2k-proxy
mixmatch/services.py
1
6357
# Copyright 2016 Massachusetts Open Cloud # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import os import operator from six.moves.urllib import parse from mixmatch import config CONF = config.CONF def construct_url(service_provider, service_type, version, action, project_id=None): """Construct the full URL for an Openstack API call.""" conf = config.get_conf_for_sp(service_provider) if service_type == 'image': endpoint = conf.image_endpoint return "%(endpoint)s/%(version)s/%(action)s" % { 'endpoint': endpoint, 'version': version, 'action': os.path.join(*action) } elif service_type == 'volume': endpoint = conf.volume_endpoint return "%(endpoint)s/%(version)s/%(project)s/%(action)s" % { 'endpoint': endpoint, 'version': version, 'project': project_id, 'action': os.path.join(*action) } def aggregate(responses, key, params=None, path=None, detailed=True): """Combine responses from several clusters into one response.""" if params: limit = int(params.get('limit', 0)) sort = params.get('sort', None) marker = params.get('marker', None) sort_key = params.get('sort_key', None) sort_dir = params.get('sort_dir', None) if sort and not sort_key: sort_key, sort_dir = sort.split(':') else: sort_key = None limit = 0 marker = None resource_list = [] for location, response in responses.items(): resources = json.loads(response.text) if type(resources) == dict: resource_list += resources[key] start = 0 last = end = len(resource_list) if sort_key: resource_list = sorted(resource_list, key=operator.itemgetter(sort_key), reverse=_is_reverse(sort_dir)) if marker: # Find the position of the resource with marker id # and set the list to start at the one after that. for index, item in enumerate(resource_list): if item['id'] == marker: start = index + 1 break if limit != 0: end = start + limit # this hack is to handle GET requests to /volumes # we automatically make the call to /volumes/detail # because we need sorting information. Here we # remove the extra values /volumes/detail provides if key == 'volumes' and not detailed: resource_list[start:end] = \ _remove_details(resource_list[start:end]) response = {key: resource_list[start:end]} # Inject the pagination URIs if start > 0: params.pop('marker', None) response['start'] = '%s?%s' % (path, parse.urlencode(params)) if end < last: params['marker'] = response[key][-1]['id'] response['next'] = '%s?%s' % (path, parse.urlencode(params)) return json.dumps(response) def list_api_versions(service_type, url): api_versions = list() if service_type == 'image': supported_versions = CONF.proxy.image_api_versions for version in supported_versions: info = dict() if version == supported_versions[0]: info.update({'status': 'CURRENT'}) else: info.update({'status': 'SUPPORTED'}) info.update({ 'id': version, 'links': [ {'href': '%s/%s/' % (url, version[:-2]), 'rel': 'self'} ] }) api_versions.append(info) return json.dumps({'versions': api_versions}) elif service_type == 'volume': supported_versions = CONF.proxy.volume_api_versions for version in supported_versions: info = dict() if version == supported_versions[0]: info.update({ 'status': 'CURRENT', 'min_version': version[1:], 'version': version[1:] }) else: info.update({ 'status': 'SUPPORTED', 'min_version': '', 'version': '' }) info.update({ 'id': version, 'updated': '2014-06-28T12:20:21Z', # FIXME 'links': [ {'href': 'http://docs.openstack.org/', 'type': 'text/html', 'rel': 'describedby'}, {'href': '%s/%s/' % (url, version[:-2]), 'rel': 'self'} ], 'media-types': [ {'base': 'application/json', 'type': 'application/vnd.openstack.volume+json;version=%s' % version[1:-2]}, {'base': 'application/xml', 'type': 'application/vnd.openstack.volume+xml;version=%s' % version[1:-2]} ] }) api_versions.append(info) return json.dumps({'versions': api_versions}) else: raise ValueError def _is_reverse(order): """Return True if order is asc, False if order is desc""" if order == 'asc': return False elif order == 'desc': return True else: raise ValueError def _remove_details(volumes): """Delete key, value pairs if key is not in keys""" keys = ['id', 'links', 'name'] for i in range(len(volumes)): volumes[i] = {key: volumes[i][key] for key in keys} return volumes
apache-2.0
martinwicke/tensorflow
tensorflow/contrib/session_bundle/gc.py
47
5977
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """System for specifying garbage collection (GC) of path based data. This framework allows for GC of data specified by path names, for example files on disk. gc.Path objects each represent a single item stored at a path and may be a base directory, /tmp/exports/0/... /tmp/exports/1/... ... or a fully qualified file, /tmp/train-1.ckpt /tmp/train-2.ckpt ... A gc filter function takes and returns a list of gc.Path items. Filter functions are responsible for selecting Path items for preservation or deletion. Note that functions should always return a sorted list. For example, base_dir = "/tmp" # create the directories for e in xrange(10): os.mkdir("%s/%d" % (base_dir, e), 0o755) # create a simple parser that pulls the export_version from the directory def parser(path): match = re.match("^" + base_dir + "/(\\d+)$", path.path) if not match: return None return path._replace(export_version=int(match.group(1))) path_list = gc.get_paths("/tmp", parser) # contains all ten Paths every_fifth = gc.mod_export_version(5) print every_fifth(path_list) # shows ["/tmp/0", "/tmp/5"] largest_three = gc.largest_export_versions(3) print largest_three(all_paths) # shows ["/tmp/7", "/tmp/8", "/tmp/9"] both = gc.union(every_fifth, largest_three) print both(all_paths) # shows ["/tmp/0", "/tmp/5", # "/tmp/7", "/tmp/8", "/tmp/9"] # delete everything not in 'both' to_delete = gc.negation(both) for p in to_delete(all_paths): gfile.DeleteRecursively(p.path) # deletes: "/tmp/1", "/tmp/2", # "/tmp/3", "/tmp/4", "/tmp/6", """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import heapq import math import os from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.python.platform import gfile Path = collections.namedtuple('Path', 'path export_version') def largest_export_versions(n): """Creates a filter that keeps the largest n export versions. Args: n: number of versions to keep. Returns: A filter function that keeps the n largest paths. """ def keep(paths): heap = [] for idx, path in enumerate(paths): if path.export_version is not None: heapq.heappush(heap, (path.export_version, idx)) keepers = [paths[i] for _, i in heapq.nlargest(n, heap)] return sorted(keepers) return keep def one_of_every_n_export_versions(n): """Creates a filter that keeps one of every n export versions. Args: n: interval size. Returns: A filter function that keeps exactly one path from each interval [0, n], (n, 2n], (2n, 3n], etc... If more than one path exists in an interval the largest is kept. """ def keep(paths): keeper_map = {} # map from interval to largest path seen in that interval for p in paths: if p.export_version is None: # Skip missing export_versions. continue # Find the interval (with a special case to map export_version = 0 to # interval 0. interval = math.floor( (p.export_version - 1) / n) if p.export_version else 0 existing = keeper_map.get(interval, None) if (not existing) or (existing.export_version < p.export_version): keeper_map[interval] = p return sorted(keeper_map.values()) return keep def mod_export_version(n): """Creates a filter that keeps every export that is a multiple of n. Args: n: step size. Returns: A filter function that keeps paths where export_version % n == 0. """ def keep(paths): keepers = [] for p in paths: if p.export_version % n == 0: keepers.append(p) return sorted(keepers) return keep def union(lf, rf): """Creates a filter that keeps the union of two filters. Args: lf: first filter rf: second filter Returns: A filter function that keeps the n largest paths. """ def keep(paths): l = set(lf(paths)) r = set(rf(paths)) return sorted(list(l|r)) return keep def negation(f): """Negate a filter. Args: f: filter function to invert Returns: A filter function that returns the negation of f. """ def keep(paths): l = set(paths) r = set(f(paths)) return sorted(list(l-r)) return keep def get_paths(base_dir, parser): """Gets a list of Paths in a given directory. Args: base_dir: directory. parser: a function which gets the raw Path and can augment it with information such as the export_version, or ignore the path by returning None. An example parser may extract the export version from a path such as "/tmp/exports/100" an another may extract from a full file name such as "/tmp/checkpoint-99.out". Returns: A list of Paths contained in the base directory with the parsing function applied. By default the following fields are populated, - Path.path The parsing function is responsible for populating, - Path.export_version """ raw_paths = gfile.ListDirectory(base_dir) paths = [] for r in raw_paths: p = parser(Path(os.path.join(base_dir, r), None)) if p: paths.append(p) return sorted(paths)
apache-2.0
grembo/buildbot
master/buildbot/steps/package/deb/lintian.py
11
3303
# This program is free software; you can # redistribute it and/or modify it under the terms of the GNU General Public # License as published by the Free Software Foundation, version 2. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Portions Copyright Buildbot Team Members # Portions Copyright Marius Rieder <marius.rieder@durchmesser.ch> """ Steps and objects related to lintian """ from __future__ import absolute_import from __future__ import print_function from buildbot import config from buildbot.process import buildstep from buildbot.process.results import FAILURE from buildbot.process.results import SUCCESS from buildbot.process.results import WARNINGS from buildbot.steps.package import util as pkgutil from buildbot.steps.shell import ShellCommand class MaxQObserver(buildstep.LogLineObserver): def __init__(self): buildstep.LogLineObserver.__init__(self) self.failures = 0 def outLineReceived(self, line): if line.startswith('TEST FAILURE:'): self.failures += 1 class DebLintian(ShellCommand): name = "lintian" description = ["Lintian running"] descriptionDone = ["Lintian"] fileloc = None suppressTags = [] warnCount = 0 errCount = 0 flunkOnFailure = False warnOnFailure = True def __init__(self, fileloc=None, suppressTags=None, **kwargs): """ Create the DebLintian object. @type fileloc: str @param fileloc: Location of the .deb or .changes to test. @type suppressTags: list @param suppressTags: List of tags to suppress. @type kwargs: dict @param kwargs: all other keyword arguments. """ ShellCommand.__init__(self, **kwargs) if fileloc: self.fileloc = fileloc if suppressTags: self.suppressTags = suppressTags if not self.fileloc: config.error("You must specify a fileloc") self.command = ["lintian", "-v", self.fileloc] if self.suppressTags: for tag in self.suppressTags: self.command += ['--suppress-tags', tag] self.obs = pkgutil.WEObserver() self.addLogObserver('stdio', self.obs) def createSummary(self, log): """ Create nice summary logs. @param log: log to create summary off of. """ warnings = self.obs.warnings errors = self.obs.errors if warnings: self.addCompleteLog('%d Warnings' % len(warnings), "\n".join(warnings)) self.warnCount = len(warnings) if errors: self.addCompleteLog('%d Errors' % len(errors), "\n".join(errors)) self.errCount = len(errors) def evaluateCommand(self, cmd): if (cmd.rc != 0 or self.errCount): return FAILURE if self.warnCount: return WARNINGS return SUCCESS
gpl-2.0
MalloyPower/parsing-python
front-end/testsuite-python-lib/Python-3.3.0/Lib/test/test_importlib/extension/test_finder.py
26
1223
from importlib import machinery from .. import abc from . import util import unittest class FinderTests(abc.FinderTests): """Test the finder for extension modules.""" def find_module(self, fullname): importer = machinery.FileFinder(util.PATH, (machinery.ExtensionFileLoader, machinery.EXTENSION_SUFFIXES)) return importer.find_module(fullname) def test_module(self): self.assertTrue(self.find_module(util.NAME)) def test_package(self): # No extension module as an __init__ available for testing. pass def test_module_in_package(self): # No extension module in a package available for testing. pass def test_package_in_package(self): # No extension module as an __init__ available for testing. pass def test_package_over_module(self): # Extension modules cannot be an __init__ for a package. pass def test_failure(self): self.assertIsNone(self.find_module('asdfjkl;')) def test_main(): from test.support import run_unittest run_unittest(FinderTests) if __name__ == '__main__': test_main()
mit
RCGTDev/votainteligente-portal-electoral
elections/tests/election_tags_based_search_tests.py
4
2452
# coding=utf-8 from elections.tests import VotaInteligenteTestCase as TestCase from django import forms from django.utils.unittest import skip from django.core.urlresolvers import reverse from elections.forms import ElectionSearchByTagsForm from elections.views import ElectionsSearchByTagView from django.views.generic.edit import FormView from django.utils.translation import ugettext as _ from elections.models import Election class ElectionTagsBasedSearchViewTestCase(TestCase): def setUp(self): super(ElectionTagsBasedSearchViewTestCase, self).setUp() #@skip('missing view') def test_get_url(self): url = reverse('tags_search') response = self.client.get(url) self.assertEquals(response.status_code, 200) self.assertTemplateUsed(response, 'search/tags_search.html') self.assertIn('form', response.context) self.assertIsInstance(response.context['form'], forms.Form) self.assertIsInstance(response.context['form'], ElectionSearchByTagsForm) def test_tags_search_view_params(self): view = ElectionsSearchByTagView() self.assertIsInstance(view, FormView) self.assertEquals(view.get_success_url(), reverse('tags_search')) self.assertEquals(view.form_class, ElectionSearchByTagsForm) self.assertEquals(view.template_name, 'search/tags_search.html') def test_get_result(self): url = reverse('tags_search') camina = u'Camiña' data = { 'q':camina } response = self.client.get(url, data=data) self.assertIn('result', response.context) expected_result = Election.objects.filter(tags__name__in=[camina]) self.assertQuerysetEqual(response.context['result'], [repr(r) for r in expected_result]) class ElectionSearchByTagsFormTestCase(TestCase): def setUp(self): super(ElectionSearchByTagsFormTestCase, self).setUp() def test_it_has_a_field_named_q(self): form = ElectionSearchByTagsForm() self.assertIn('q', form.fields) self.assertIsInstance(form.fields['q'], forms.CharField) self.assertFalse(form.fields['q'].required) self.assertEquals(form.fields['q'].label, _('Busca tu comuna')) def test_get_result(self): camina = u"Camiña" expected_result = Election.objects.filter(tags__name__in=[camina]) initial = { 'q':camina } form = ElectionSearchByTagsForm(data=initial) #the view does this for me form.full_clean() #the view does this for me search_result = form.get_search_result() self.assertQuerysetEqual(search_result, [repr(r) for r in expected_result])
gpl-3.0
BL-Labs/sample_generator_datatools
sample_generator/sample_generator/settings.py
1
5561
# Django settings for sample_generator project. DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( # ('Your Name', 'your_email@example.com'), ) MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'. 'NAME': '', # Or path to database file if using sqlite3. # The following settings are not used with sqlite3: 'USER': '', 'PASSWORD': '', 'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP. 'PORT': '', # Set to empty string for default. } } # Hosts/domain names that are valid for this site; required if DEBUG is False # See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts ALLOWED_HOSTS = [] # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # In a Windows environment this must be set to your system time zone. TIME_ZONE = 'America/Chicago' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/var/www/example.com/media/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://example.com/media/", "http://media.example.com/" MEDIA_URL = '' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/var/www/example.com/static/" STATIC_ROOT = '' # URL prefix for static files. # Example: "http://example.com/static/", "http://static.example.com/" STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY = 'd*%h9-no(sl=ush!0l2n19lfk!-6+sbl531!h%p&e2ox#van01' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', # 'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', # Uncomment the next line for simple clickjacking protection: # 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'sample_generator.urls' # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'sample_generator.wsgi.application' TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', # Uncomment the next line to enable the admin: 'django.contrib.admin', # Uncomment the next line to enable admin documentation: # 'django.contrib.admindocs', 'frontend', ) # SAMPLE GENERATOR VALUES SOLR_CONNECTION = "http://localhost:8983/solr/samplegenerator" SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer' # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } }
mit
ddico/odoo
addons/website/tests/test_controllers.py
1
1702
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. import json from odoo import tests from odoo.tools import mute_logger @tests.tagged('post_install', '-at_install') class TestControllers(tests.HttpCase): @mute_logger('odoo.addons.http_routing.models.ir_http', 'odoo.http') def test_last_created_pages_autocompletion(self): self.authenticate("admin", "admin") Page = self.env['website.page'] last_5_url_edited = [] base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url') suggested_links_url = base_url + '/website/get_suggested_links' for i in range(0, 10): new_page = Page.create({ 'name': 'Generic', 'type': 'qweb', 'arch': ''' <div>content</div> ''', 'key': "test.generic_view-%d" % i, 'url': "/generic-%d" % i, 'is_published': True, }) if i % 2 == 0: # mark as old new_page._write({'write_date': '2020-01-01'}) else: last_5_url_edited.append(new_page.url) res = self.opener.post(url=suggested_links_url, json={'params': {'needle': '/'}}) resp = json.loads(res.content) assert 'result' in resp suggested_links = resp['result'] last_modified_history = next(o for o in suggested_links['others'] if o["title"] == "Last modified pages") last_modified_values = map(lambda o: o['value'], last_modified_history['values']) self.assertTrue(set(last_modified_values) == set(last_5_url_edited))
agpl-3.0
Therp/odoo
addons/hw_escpos/escpos/exceptions.py
170
2884
""" ESC/POS Exceptions classes """ import os class Error(Exception): """ Base class for ESC/POS errors """ def __init__(self, msg, status=None): Exception.__init__(self) self.msg = msg self.resultcode = 1 if status is not None: self.resultcode = status def __str__(self): return self.msg # Result/Exit codes # 0 = success # 10 = No Barcode type defined # 20 = Barcode size values are out of range # 30 = Barcode text not supplied # 40 = Image height is too large # 50 = No string supplied to be printed # 60 = Invalid pin to send Cash Drawer pulse class BarcodeTypeError(Error): def __init__(self, msg=""): Error.__init__(self, msg) self.msg = msg self.resultcode = 10 def __str__(self): return "No Barcode type is defined" class BarcodeSizeError(Error): def __init__(self, msg=""): Error.__init__(self, msg) self.msg = msg self.resultcode = 20 def __str__(self): return "Barcode size is out of range" class BarcodeCodeError(Error): def __init__(self, msg=""): Error.__init__(self, msg) self.msg = msg self.resultcode = 30 def __str__(self): return "Code was not supplied" class ImageSizeError(Error): def __init__(self, msg=""): Error.__init__(self, msg) self.msg = msg self.resultcode = 40 def __str__(self): return "Image height is longer than 255px and can't be printed" class TextError(Error): def __init__(self, msg=""): Error.__init__(self, msg) self.msg = msg self.resultcode = 50 def __str__(self): return "Text string must be supplied to the text() method" class CashDrawerError(Error): def __init__(self, msg=""): Error.__init__(self, msg) self.msg = msg self.resultcode = 60 def __str__(self): return "Valid pin must be set to send pulse" class NoStatusError(Error): def __init__(self, msg=""): Error.__init__(self, msg) self.msg = msg self.resultcode = 70 def __str__(self): return "Impossible to get status from the printer" class TicketNotPrinted(Error): def __init__(self, msg=""): Error.__init__(self, msg) self.msg = msg self.resultcode = 80 def __str__(self): return "A part of the ticket was not been printed" class NoDeviceError(Error): def __init__(self, msg=""): Error.__init__(self, msg) self.msg = msg self.resultcode = 90 def __str__(self): return "Impossible to find the printer Device" class HandleDeviceError(Error): def __init__(self, msg=""): Error.__init__(self, msg) self.msg = msg self.resultcode = 100 def __str__(self): return "Impossible to handle device"
agpl-3.0
ftranschel/evoMPS
evoMPS/tdvp_uniform.py
1
54221
# -*- coding: utf-8 -*- """ Created on Thu Oct 13 17:29:27 2011 @author: Ashley Milsted TODO: - Clean up CG code: Create nice interface? - Split out excitations stuff? """ import numpy as np import scipy as sp import scipy.linalg as la import scipy.sparse.linalg as las import scipy.optimize as opti import tdvp_common as tm import matmul as m from mps_uniform import EvoMPS_MPS_Uniform from mps_uniform_pinv import pinv_1mE import logging log = logging.getLogger(__name__) class Excite_H_Op: def __init__(self, tdvp, donor, p): """Creates an Excite_H_Op object, which is a LinearOperator. This wraps the effective Hamiltonian in terms of MPS tangent vectors as a LinearOperator that can be used with SciPy's sparse linear algebra routines. Parameters ---------- tdvp : EvoMPS_TDVP_Uniform tdvp object providing the required operations in the matrix representation. donor : EvoMPS_TDVP_Uniform Second tdvp object (can be the same as tdvp), for example containing a different ground state. p : float Momentum in units of inverse lattice spacing. """ self.donor = donor self.p = p self.D = tdvp.D self.q = tdvp.q d = (self.q - 1) * self.D**2 self.shape = (d, d) self.dtype = np.dtype(tdvp.typ) if tdvp.ham_sites == 2: self.prereq = (tdvp.calc_BHB_prereq(donor)) self.calc_BHB = tdvp.calc_BHB else: self.prereq = (tdvp.calc_BHB_prereq_3s(donor)) self.calc_BHB = tdvp.calc_BHB_3s self.calls = 0 self.M_prev = None self.y_pi_prev = None def matvec(self, v): x = v.reshape((self.D, (self.q - 1)*self.D)) self.calls += 1 log.debug("Calls: %u", self.calls) res, self.M_prev, self.y_pi_prev = self.calc_BHB(x, self.p, self.donor, *self.prereq, M_prev=self.M_prev, y_pi_prev=self.y_pi_prev) return res.ravel() class EvoMPS_TDVP_Uniform(EvoMPS_MPS_Uniform): def __init__(self, D, q, ham, ham_sites=None, dtype=None): """Implements the TDVP algorithm for uniform MPS. Parameters ---------- D : int The bond-dimension q : int The single-site Hilbert space dimension ham : callable or ndarray Local Hamiltonian term (acting on two or three adjacent sites) ham_sites : int The number of sites acted on non-trivially by ham. Should be specified for callable ham. dtype : numpy dtype = None Specifies the array type. """ self.ham = ham """The local Hamiltonian term. Can be changed, for example, to perform a quench. The number of neighbouring sites acted on must be specified in ham_sites.""" if ham_sites is None: try: self.ham_sites = len(ham.shape) / 2 except AttributeError: #TODO: Try to count arguments using inspect module self.ham_sites = 2 else: self.ham_sites = ham_sites if not (self.ham_sites == 2 or self.ham_sites == 3): raise ValueError("Only 2 or 3 site Hamiltonian terms supported!") self.K_solver = las.bicgstab super(EvoMPS_TDVP_Uniform, self).__init__(D, q, dtype=dtype) self.eta = sp.NaN """The norm of the TDVP tangent vector (projection of the exact time evolution onto the MPS tangent plane. Only available after calling take_step().""" self.h_expect = sp.NaN """The energy density expectation value, available only after calling update() or calc_K().""" def _init_arrays(self, D, q): super(EvoMPS_TDVP_Uniform, self)._init_arrays(D, q) ham_shape = [] for i in xrange(self.ham_sites): ham_shape.append(q) C_shape = tuple(ham_shape + [D, D]) self.C = np.zeros(C_shape, dtype=self.typ, order=self.odr) self.K = np.ones_like(self.A[0]) self.K_left = None def set_ham_array_from_function(self, ham_func): """Generates a Hamiltonian array from a function. Given a function ham_func(s, t, u, v) this generates an array ham[s, t, u, v] (example for self.ham_sites == 2). Using an array instead of a function can significantly speed up parts of the algorithm. Parameters ---------- ham_func : callable Local Hamiltonian term with self.ham_sites * 2 required arguments. """ hv = np.vectorize(ham_func, otypes=[np.complex128]) if self.ham_sites == 2: self.ham = np.fromfunction(hv, (self.q, self.q, self.q, self.q)) else: self.ham = np.fromfunction(hv, tuple([self.q] * 6)) def calc_C(self): """Generates the C tensor used to calculate the K and ultimately B. This is called automatically by self.update(). C contains a contraction of the Hamiltonian self.ham with the parameter tensors over the local basis indices. This is prerequisite for calculating the tangent vector parameters B, which optimally approximate the exact time evolution. Makes use only of the nearest-neighbour Hamiltonian, and of the A's. C depends on A. """ if callable(self.ham): ham = np.vectorize(self.ham, otypes=[sp.complex128]) ham = np.fromfunction(ham, tuple(self.C.shape[:-2] * 2)) else: ham = self.ham if self.ham_sites == 2: self.C[:] = tm.calc_C_mat_op_AA(ham, self.AA) else: self.AAA = tm.calc_AAA(self.A, self.A, self.A) self.C[:] = tm.calc_C_3s_mat_op_AAA(ham, self.AAA) def calc_PPinv(self, x, p=0, out=None, left=False, A1=None, A2=None, r=None, pseudo=True, brute_check=False, sc_data='', solver=None): """Uses an iterative method to calculate the result of applying the (pseudo) inverse of (1 - exp(1.j * p) * E) to a vector |x>. Parameters ---------- x : ndarray The matrix representation of the vector |x>. p : float Momentum in units of inverse lattice spacing. out : ndarray Appropriately-sized output matrix. left : bool Whether to act left on |x> (instead of right). A1 : ndarray Ket parameter tensor. A2 : ndarray Bra parameter tensor. r : ndarray Right eigenvector of E corresponding to the largest eigenvalue. pseudo : bool Whether to calculate the pseudo inverse (or just the inverse). brute_check : bool Whether to check the answer using dense methods (scales as D**6!). Returns ------- out : ndarray The result of applying the inverse operator, in matrix form. """ if A1 is None: A1 = self.A if A2 is None: A2 = self.A if r is None: r = self.r out = pinv_1mE(x, A1, A2, self.l, r, p=p, left=left, pseudo=pseudo, out=out, tol=self.itr_rtol, solver=solver, sanity_checks=self.sanity_checks, sc_data=sc_data) return out def calc_K(self): """Generates the K matrix used to calculate B. This also updates the energy-density expectation value self.h_expect. This is called automatically by self.update(). K contains the (non-trivial) action of the Hamiltonian on the right half of the infinite chain. It directly depends on A, r, and C. """ if self.ham_sites == 2: Hr = tm.eps_r_op_2s_C12_AA34(self.r, self.C, self.AA) else: Hr = tm.eps_r_op_3s_C123_AAA456(self.r, self.C, self.AAA) self.h_expect = m.adot(self.l, Hr) QHr = Hr - self.r * self.h_expect self.calc_PPinv(QHr, out=self.K, solver=self.K_solver) if self.sanity_checks: Ex = tm.eps_r_noop(self.K, self.A, self.A) QEQ = Ex - self.r * m.adot(self.l, self.K) res = self.K - QEQ if not np.allclose(res, QHr): log.warning("Sanity check failed: Bad K!") log.warning("Off by: %s", la.norm(res - QHr)) def calc_K_l(self): """Generates the left K matrix. See self.calc_K(). K contains the (non-trivial) action of the Hamiltonian on the left half of the infinite chain. It directly depends on A, l, and C. This calculates the "bra-vector" K_l ~ <K_l| (and K_l.conj().T ~ |K_l>) so that <K_l|r> = trace(K_l.dot(r)) Returns ------- K_left : ndarray The left K matrix. h : complex The energy-density expectation value. """ if self.ham_sites == 2: lH = tm.eps_l_op_2s_AA12_C34(self.l, self.AA, self.C) else: lH = tm.eps_l_op_3s_AAA123_C456(self.l, self.AAA, self.C) h = m.adot_noconj(lH, self.r) #=tr(lH r) lHQ = lH - self.l * h #Since A1=A2 and p=0, we get the right result without turning lHQ into a ket. #This is the same as... #self.K_left = (self.calc_PPinv(lHQ.conj().T, left=True, out=self.K_left)).conj().T self.K_left = self.calc_PPinv(lHQ, left=True, out=self.K_left, solver=self.K_solver) if self.sanity_checks: xE = tm.eps_l_noop(self.K_left, self.A, self.A) QEQ = xE - self.l * m.adot(self.r, self.K_left) res = self.K_left - QEQ if not np.allclose(res, lHQ): log.warning("Sanity check failed: Bad K_left!") log.warning("Off by: %s", la.norm(res - lHQ)) return self.K_left, h def get_B_from_x(self, x, Vsh, l_sqrt_i, r_sqrt_i, out=None): """Calculates a gauge-fixing B-tensor given parameters x. Parameters ---------- x : ndarray The parameter matrix. Vsh : ndarray Parametrization tensor. l_sqrt_i : ndarray The matrix self.l to the power of -1/2. r_sqrt_i : ndarray The matrix self.r to the power of -1/2. out : ndarray Output tensor of appropriate shape. """ if out is None: out = np.zeros_like(self.A) for s in xrange(self.q): out[s] = l_sqrt_i.dot(x).dot(r_sqrt_i.dot(Vsh[s]).conj().T) return out def calc_l_r_roots(self): """Calculates the (inverse) square roots of self.l and self.r. """ self.l_sqrt, self.l_sqrt_i, self.r_sqrt, self.r_sqrt_i = tm.calc_l_r_roots(self.l, self.r, zero_tol=self.zero_tol, sanity_checks=self.sanity_checks) def calc_B(self, set_eta=True): """Calculates a gauge-fixing tangent-vector parameter tensor capturing the projected infinitesimal time evolution of the state. A TDVP time step is defined as: A -= dtau * B where dtau is an infinitesimal imaginary time step. Parameters ---------- set_eta : bool Whether to set self.eta to the norm of the tangent vector. """ self.calc_l_r_roots() self.Vsh = tm.calc_Vsh(self.A, self.r_sqrt, sanity_checks=self.sanity_checks) if self.ham_sites == 2: self.x = tm.calc_x(self.K, self.C, self.C, self.r, self.l, self.A, self.A, self.A, self.l_sqrt, self.l_sqrt_i, self.r_sqrt, self.r_sqrt_i, self.Vsh) else: self.x = tm.calc_x_3s(self.K, self.C, self.C, self.C, self.r, self.r, self.l, self.l, self.A, self.A, self.A, self.A, self.A, self.l_sqrt, self.l_sqrt_i, self.r_sqrt, self.r_sqrt_i, self.Vsh) if set_eta: self.eta = sp.sqrt(m.adot(self.x, self.x)) B = self.get_B_from_x(self.x, self.Vsh, self.l_sqrt_i, self.r_sqrt_i) if self.sanity_checks: #Test gauge-fixing: tst = tm.eps_r_noop(self.r, B, self.A) if not np.allclose(tst, 0): log.warning("Sanity check failed: Gauge-fixing violation! %s" ,la.norm(tst)) return B def update(self, restore_CF=True, auto_truncate=False, restore_CF_after_trunc=True): """Updates secondary quantities to reflect the state parameters self.A. Must be used after taking a step or otherwise changing the parameters self.A before calculating physical quantities or taking the next step. Also (optionally) restores canonical form by calling self.restore_CF(). Parameters ---------- restore_CF : bool (True) Whether to restore canonical form. auto_truncate : bool (True) Whether to automatically truncate the bond-dimension if rank-deficiency is detected. Requires restore_CF. restore_CF_after_trunc : bool (True) Whether to restore_CF after truncation. """ super(EvoMPS_TDVP_Uniform, self).update(restore_CF=restore_CF, auto_truncate=auto_truncate, restore_CF_after_trunc=restore_CF_after_trunc) self.calc_C() self.calc_K() def take_step(self, dtau, B=None): """Performs a complete forward-Euler step of imaginary time dtau. The operation is A -= dtau * B with B from self.calc_B() by default. If dtau is itself imaginary, real-time evolution results. Parameters ---------- dtau : complex The (imaginary or real) amount of imaginary time (tau) to step. B : ndarray A custom parameter-space tangent vector to step along. """ if B is None: B = self.calc_B() self.A += -dtau * B def take_step_RK4(self, dtau, B_i=None): """Take a step using the fourth-order explicit Runge-Kutta method. This requires more memory than a simple forward Euler step. It is, however, far more accurate with a per-step error of order dtau**5. Parameters ---------- dtau : complex The (imaginary or real) amount of imaginary time (tau) to step. B_i : ndarray B calculated using self.calc_B() (if known, to avoid calculating it again). """ def update(): self.calc_lr() self.calc_AA() self.calc_C() self.calc_K() A0 = self.A.copy() B_fin = np.empty_like(self.A) if not B_i is None: B = B_i else: B = self.calc_B() #k1 B_fin = B self.A = A0 - dtau/2 * B update() B = self.calc_B(set_eta=False) #k2 self.A = A0 - dtau/2 * B B_fin += 2 * B update() B = self.calc_B(set_eta=False) #k3 self.A = A0 - dtau * B B_fin += 2 * B update() B = self.calc_B(set_eta=False) #k4 B_fin += B self.A = A0 - dtau /6 * B_fin def calc_BHB_prereq(self, donor): """Calculates prerequisites for the application of the effective Hamiltonian in terms of tangent vectors. This is called (indirectly) by the self.excite.. functions. Parameters ---------- donor: EvoMPS_TDVP_Uniform Second state (may be the same, or another ground state). Returns ------- A lot of stuff. """ l = self.l r_ = donor.r r__sqrt = donor.r_sqrt r__sqrt_i = donor.r_sqrt_i A = self.A A_ = donor.A AA_ = donor.AA eyed = np.eye(self.q**self.ham_sites) eyed = eyed.reshape(tuple([self.q] * self.ham_sites * 2)) ham_ = self.ham - self.h_expect.real * eyed V_ = sp.transpose(donor.Vsh, axes=(0, 2, 1)).conj() Vri_ = sp.zeros_like(V_) try: for s in xrange(donor.q): Vri_[s] = r__sqrt_i.dot_left(V_[s]) except AttributeError: for s in xrange(donor.q): Vri_[s] = V_[s].dot(r__sqrt_i) Vr_ = sp.zeros_like(V_) try: for s in xrange(donor.q): Vr_[s] = r__sqrt.dot_left(V_[s]) except AttributeError: for s in xrange(donor.q): Vr_[s] = V_[s].dot(r__sqrt) _C_AhlA = np.empty_like(self.C) for u in xrange(self.q): for s in xrange(self.q): _C_AhlA[u, s] = A[u].conj().T.dot(l.dot(A[s])) C_AhlA = sp.tensordot(ham_, _C_AhlA, ((0, 2), (0, 1))) _C_A_Vrh_ = tm.calc_AA(A_, sp.transpose(Vr_, axes=(0, 2, 1)).conj()) C_A_Vrh_ = sp.tensordot(ham_, _C_A_Vrh_, ((3, 1), (0, 1))) C_Vri_A_conj = tm.calc_C_conj_mat_op_AA(ham_, tm.calc_AA(Vri_, A_)) C_ = tm.calc_C_mat_op_AA(ham_, AA_) C_conj = tm.calc_C_conj_mat_op_AA(ham_, AA_) rhs10 = tm.eps_r_op_2s_AA12_C34(r_, AA_, C_Vri_A_conj) return ham_, C_, C_conj, V_, Vr_, Vri_, C_Vri_A_conj, C_AhlA, C_A_Vrh_, rhs10 def calc_BHB(self, x, p, donor, ham_, C_, C_conj, V_, Vr_, Vri_, C_Vri_A_conj, C_AhlA, C_A_Vrh_, rhs10, M_prev=None, y_pi_prev=None, pinv_solver=None): """Calculates the result of applying the effective Hamiltonian in terms of tangent vectors to a particular tangent vector specified by x. Note: For a good approx. ground state, H should be Hermitian pos. semi-def. Parameters ---------- x : ndarray The tangent vector parameters according to the gauge-fixing parametrization. p : float Momentum in units of inverse lattice spacing. donor: EvoMPS_TDVP_Uniform Second state (may be the same, or another ground state). ...others... Prerequisites returned by self.calc_BHB_prereq(). """ if pinv_solver is None: pinv_solver = las.gmres A = self.A A_ = donor.A l = self.l r_ = donor.r l_sqrt = self.l_sqrt l_sqrt_i = self.l_sqrt_i r__sqrt = donor.r_sqrt r__sqrt_i = donor.r_sqrt_i K__r = donor.K K_l = self.K_left #this is the 'bra' vector already pseudo = donor is self B = donor.get_B_from_x(x, donor.Vsh, l_sqrt_i, r__sqrt_i) #Skip zeros due to rank-deficiency if la.norm(B) == 0: return sp.zeros_like(x), M_prev, y_pi_prev if self.sanity_checks: tst = tm.eps_r_noop(r_, B, A_) if not np.allclose(tst, 0): log.warning("Sanity check failed: Gauge-fixing violation! %s", la.norm(tst)) if self.sanity_checks: B2 = np.zeros_like(B) for s in xrange(self.q): B2[s] = l_sqrt_i.dot(x.dot(Vri_[s])) if not sp.allclose(B, B2, rtol=self.itr_rtol*self.check_fac, atol=self.itr_atol*self.check_fac): log.warning("Sanity Fail in calc_BHB! Bad Vri!") BA_ = tm.calc_AA(B, A_) AB = tm.calc_AA(self.A, B) y = tm.eps_l_noop(l, B, self.A) M = self.calc_PPinv(y, p=-p, left=True, A1=A_, pseudo=pseudo, sc_data='M', out=M_prev, solver=pinv_solver) if self.sanity_checks: y2 = M - sp.exp(+1.j * p) * tm.eps_l_noop(M, A_, self.A) #(1 - exp(pj) EA_A |M>) if not sp.allclose(y, y2, rtol=1E-10, atol=1E-12): norm = la.norm(y.ravel()) if norm == 0: norm = 1 log.warning("Sanity Fail in calc_BHB! Bad M. Off by: %g", (la.norm((y - y2).ravel()) / norm)) if pseudo: M = M - l * m.adot(r_, M) Mh = m.H(M) res = l_sqrt.dot( tm.eps_r_op_2s_AA12_C34(r_, BA_, C_Vri_A_conj) #1 OK + sp.exp(+1.j * p) * tm.eps_r_op_2s_AA12_C34(r_, AB, C_Vri_A_conj) #3 OK with 4 ) #res.fill(0) res += sp.exp(-1.j * p) * l_sqrt_i.dot(Mh.dot(rhs10)) #10 exp = sp.exp subres = sp.zeros_like(res) eye = m.eyemat(C_.shape[2], dtype=self.typ) for s in xrange(self.q): #subres += C_AhlA[s, t].dot(B[s]).dot(Vr_[t].conj().T) #2 OK subres += tm.eps_r_noop(B[s], C_AhlA[:, s], Vr_) #+ exp(-1.j * p) * A[t].conj().T.dot(l.dot(B[s])).dot(C_A_Vrh_[s, t]) #4 OK with 3 subres += exp(-1.j * p) * tm.eps_l_noop(l.dot(B[s]), A, C_A_Vrh_[:, s]) #+ exp(-2.j * p) * A[s].conj().T.dot(Mh.dot(C_[s, t])).dot(Vr_[t].conj().T)) #12 subres += exp(-2.j * p) * A[s].conj().T.dot(Mh).dot(tm.eps_r_noop(eye, C_[s], Vr_)) res += l_sqrt_i.dot(subres) res += l_sqrt.dot(tm.eps_r_noop(K__r, B, Vri_)) #5 OK res += l_sqrt_i.dot(K_l.dot(tm.eps_r_noop(r__sqrt, B, V_))) #6 res += sp.exp(-1.j * p) * l_sqrt_i.dot(Mh.dot(tm.eps_r_noop(K__r, A_, Vri_))) #8 y1 = sp.exp(+1.j * p) * tm.eps_r_noop(K__r, B, A_) #7 y2 = sp.exp(+1.j * p) * tm.eps_r_op_2s_AA12_C34(r_, BA_, C_conj) #9 y3 = sp.exp(+2.j * p) * tm.eps_r_op_2s_AA12_C34(r_, AB, C_conj) #11 y = y1 + y2 + y3 if pseudo: y = y - m.adot(l, y) * r_ y_pi = self.calc_PPinv(y, p=p, A2=A_, pseudo=pseudo, sc_data='y_pi', out=y_pi_prev, solver=pinv_solver) #print m.adot(l, y_pi) if self.sanity_checks: y2 = y_pi - sp.exp(+1.j * p) * tm.eps_r_noop(y_pi, self.A, A_) if not sp.allclose(y, y2, rtol=1E-10, atol=1E-12): log.warning("Sanity Fail in calc_BHB! Bad y_pi. Off by: %g", la.norm((y - y2).ravel()) / la.norm(y.ravel())) if pseudo: y_pi = y_pi - m.adot(l, y_pi) * r_ res += l_sqrt.dot(tm.eps_r_noop(y_pi, self.A, Vri_)) if self.sanity_checks: expval = m.adot(x, res) / m.adot(x, x) #print "expval = " + str(expval) if expval < 0: log.warning("Sanity Fail in calc_BHB! H is not pos. semi-definite (%s)", expval) if not abs(expval.imag) < 1E-9: log.warning("Sanity Fail in calc_BHB! H is not Hermitian (%s)", expval) return res, M, y_pi def calc_BHB_prereq_3s(self, donor): """As for self.calc_BHB_prereq(), but for Hamiltonian terms acting on three sites. """ l = self.l r_ = donor.r r__sqrt = donor.r_sqrt r__sqrt_i = donor.r_sqrt_i A = self.A AA = self.AA A_ = donor.A AA_ = donor.AA AAA_ = donor.AAA eyed = np.eye(self.q**self.ham_sites) eyed = eyed.reshape(tuple([self.q] * self.ham_sites * 2)) ham_ = self.ham - self.h_expect.real * eyed V_ = sp.zeros((donor.Vsh.shape[0], donor.Vsh.shape[2], donor.Vsh.shape[1]), dtype=self.typ) for s in xrange(donor.q): V_[s] = m.H(donor.Vsh[s]) Vri_ = sp.zeros_like(V_) try: for s in xrange(donor.q): Vri_[s] = r__sqrt_i.dot_left(V_[s]) except AttributeError: for s in xrange(donor.q): Vri_[s] = V_[s].dot(r__sqrt_i) Vr_ = sp.zeros_like(V_) try: for s in xrange(donor.q): Vr_[s] = r__sqrt.dot_left(V_[s]) except AttributeError: for s in xrange(donor.q): Vr_[s] = V_[s].dot(r__sqrt) C_Vri_AA_ = np.empty((self.q, self.q, self.q, Vri_.shape[1], A_.shape[2]), dtype=self.typ) for s in xrange(self.q): for t in xrange(self.q): for u in xrange(self.q): C_Vri_AA_[s, t, u] = Vri_[s].dot(AA_[t, u]) C_Vri_AA_ = sp.tensordot(ham_, C_Vri_AA_, ((3, 4, 5), (0, 1, 2))) C_AAA_r_Ah_Vrih = np.empty((self.q, self.q, self.q, self.q, self.q, #FIXME: could be too memory-intensive A_.shape[1], Vri_.shape[1]), dtype=self.typ) for s in xrange(self.q): for t in xrange(self.q): for u in xrange(self.q): for k in xrange(self.q): for j in xrange(self.q): C_AAA_r_Ah_Vrih[s, t, u, k, j] = AAA_[s, t, u].dot(r_.dot(A_[k].conj().T)).dot(Vri_[j].conj().T) C_AAA_r_Ah_Vrih = sp.tensordot(ham_, C_AAA_r_Ah_Vrih, ((3, 4, 5, 2, 1), (0, 1, 2, 3, 4))) C_AhAhlAA = np.empty((self.q, self.q, self.q, self.q, A_.shape[2], A.shape[2]), dtype=self.typ) for t in xrange(self.q): for j in xrange(self.q): for i in xrange(self.q): for s in xrange(self.q): C_AhAhlAA[t, j, i, s] = AA[i, j].conj().T.dot(l.dot(AA[s, t])) C_AhAhlAA = sp.tensordot(ham_, C_AhAhlAA, ((4, 1, 0, 3), (0, 1, 2, 3))) C_AA_r_Ah_Vrih_ = np.empty((self.q, self.q, self.q, self.q, A_.shape[1], Vri_.shape[1]), dtype=self.typ) for t in xrange(self.q): for u in xrange(self.q): for k in xrange(self.q): for j in xrange(self.q): C_AA_r_Ah_Vrih_[t, u, k, j] = AA_[t, u].dot(r_.dot(A_[k].conj().T)).dot(Vri_[j].conj().T) C_AA_r_Ah_Vrih_ = sp.tensordot(ham_, C_AA_r_Ah_Vrih_, ((4, 5, 2, 1), (0, 1, 2, 3))) C_AAA_Vrh_ = np.empty((self.q, self.q, self.q, self.q, A_.shape[1], Vri_.shape[1]), dtype=self.typ) for s in xrange(self.q): for t in xrange(self.q): for u in xrange(self.q): for k in xrange(self.q): C_AAA_Vrh_[s, t, u, k] = AAA_[s, t, u].dot(Vr_[k].conj().T) C_AAA_Vrh_ = sp.tensordot(ham_, C_AAA_Vrh_, ((3, 4, 5, 2), (0, 1, 2, 3))) C_A_r_Ah_Vrih = np.empty((self.q, self.q, self.q, A_.shape[2], Vri_.shape[1]), dtype=self.typ) for u in xrange(self.q): for k in xrange(self.q): for j in xrange(self.q): C_A_r_Ah_Vrih[u, k, j] = A_[u].dot(r_.dot(A_[k].conj().T)).dot(Vri_[j].conj().T) C_A_r_Ah_Vrih = sp.tensordot(ham_, C_A_r_Ah_Vrih, ((5, 2, 1), (0, 1, 2))) C_AhlAA = np.empty((self.q, self.q, self.q, A_.shape[2], A.shape[2]), dtype=self.typ) for s in xrange(self.q): for t in xrange(self.q): for i in xrange(self.q): C_AhlAA[s, t, i] = A[i].conj().T.dot(l.dot(AA[s, t])) C_AhlAA = sp.tensordot(ham_, C_AhlAA, ((3, 4, 0), (0, 1, 2))) C_AhAhlA = np.empty((self.q, self.q, self.q, A_.shape[2], A.shape[2]), dtype=self.typ) for j in xrange(self.q): for i in xrange(self.q): for s in xrange(self.q): C_AhAhlA[j, i, s] = AA[i, j].conj().T.dot(l.dot(A[s])) C_AhAhlA = sp.tensordot(ham_, C_AhAhlA, ((1, 0, 3), (0, 1, 2))) C_AA_Vrh = np.empty((self.q, self.q, self.q, A_.shape[2], Vr_.shape[1]), dtype=self.typ) for t in xrange(self.q): for u in xrange(self.q): for k in xrange(self.q): C_AA_Vrh[t, u, k] = AA_[t, u].dot(Vr_[k].conj().T) C_AA_Vrh = sp.tensordot(ham_, C_AA_Vrh, ((4, 5, 2), (0, 1, 2))) C_ = sp.tensordot(ham_, AAA_, ((3, 4, 5), (0, 1, 2))) rhs10 = tm.eps_r_op_3s_C123_AAA456(r_, AAA_, C_Vri_AA_) #NOTE: These C's are good as C12 or C34, but only because h is Hermitian! #TODO: Make this consistent with the updated 2-site case above. return V_, Vr_, Vri_, C_, C_Vri_AA_, C_AAA_r_Ah_Vrih, C_AhAhlAA, C_AA_r_Ah_Vrih_, C_AAA_Vrh_, C_A_r_Ah_Vrih, C_AhlAA, C_AhAhlA, C_AA_Vrh, rhs10, def calc_BHB_3s(self, x, p, donor, V_, Vr_, Vri_, C_, C_Vri_AA_, C_AAA_r_Ah_Vrih, C_AhAhlAA, C_AA_r_Ah_Vrih_, C_AAA_Vrh_, C_A_r_Ah_Vrih, C_AhlAA, C_AhAhlA, C_AA_Vrh, rhs10, M_prev=None, y_pi_prev=None, pinv_solver=None): """As for self.calc_BHB(), but for Hamiltonian terms acting on three sites. """ if pinv_solver is None: pinv_solver = las.gmres A = self.A A_ = donor.A l = self.l r_ = donor.r l_sqrt = self.l_sqrt l_sqrt_i = self.l_sqrt_i r__sqrt = donor.r_sqrt r__sqrt_i = donor.r_sqrt_i K__r = donor.K K_l = self.K_left pseudo = donor is self B = donor.get_B_from_x(x, donor.Vsh, l_sqrt_i, r__sqrt_i) if self.sanity_checks: tst = tm.eps_r_noop(r_, B, A_) if not np.allclose(tst, 0): log.warning("Sanity check failed: Gauge-fixing violation!") if self.sanity_checks: B2 = np.zeros_like(B) for s in xrange(self.q): B2[s] = l_sqrt_i.dot(x.dot(Vri_[s])) if not sp.allclose(B, B2, rtol=self.itr_rtol*self.check_fac, atol=self.itr_atol*self.check_fac): log.warning("Sanity Fail in calc_BHB! Bad Vri!") BAA_ = tm.calc_AAA(B, A_, A_) ABA_ = tm.calc_AAA(A, B, A_) AAB = tm.calc_AAA(A, A, B) y = tm.eps_l_noop(l, B, self.A) if pseudo: y = y - m.adot(r_, y) * l #should just = y due to gauge-fixing M = self.calc_PPinv(y, p=-p, left=True, A1=A_, r=r_, pseudo=pseudo, out=M_prev, solver=pinv_solver) #print m.adot(r, M) if self.sanity_checks: y2 = M - sp.exp(+1.j * p) * tm.eps_l_noop(M, A_, self.A) if not sp.allclose(y, y2): log.warning("Sanity Fail in calc_BHB! Bad M. Off by: %g", (la.norm((y - y2).ravel()) / la.norm(y.ravel()))) Mh = m.H(M) res = l_sqrt.dot( tm.eps_r_op_3s_C123_AAA456(r_, BAA_, C_Vri_AA_) #1 1D + sp.exp(+1.j * p) * tm.eps_r_op_3s_C123_AAA456(r_, ABA_, C_Vri_AA_) #3 + sp.exp(+2.j * p) * tm.eps_r_op_3s_C123_AAA456(r_, AAB, C_Vri_AA_) #3c ) #res.fill(0) res += sp.exp(-1.j * p) * l_sqrt_i.dot(Mh.dot(rhs10)) #10 exp = sp.exp subres = sp.zeros_like(res) for s in xrange(self.q): subres += exp(-2.j * p) * A[s].conj().T.dot(Mh.dot(C_AAA_r_Ah_Vrih[s])) #12 for t in xrange(self.q): subres += (C_AhAhlAA[t, s].dot(B[s]).dot(Vr_[t].conj().T)) #2b subres += (exp(-1.j * p) * A[s].conj().T.dot(l.dot(B[t])).dot(C_AA_r_Ah_Vrih_[s, t])) #4 subres += (exp(-3.j * p) * A[s].conj().T.dot(A[t].conj().T).dot(Mh).dot(C_AAA_Vrh_[t, s])) #12b for u in xrange(self.q): subres += (A[s].conj().T.dot(l.dot(A[t]).dot(B[u])).dot(C_A_r_Ah_Vrih[s, t, u])) #2 -ive of that it should be.... subres += (exp(+1.j * p) * C_AhlAA[u, t, s].dot(B[s]).dot(r_.dot(A_[t].conj().T)).dot(Vri_[u].conj().T)) #3b subres += (exp(-1.j * p) * C_AhAhlA[s, t, u].dot(B[t]).dot(A_[u]).dot(Vr_[s].conj().T)) #4b subres += (exp(-2.j * p) * A[s].conj().T.dot(A[t].conj().T).dot(l.dot(B[u])).dot(C_AA_Vrh[t, s, u])) #4c res += l_sqrt_i.dot(subres) res += l_sqrt.dot(tm.eps_r_noop(K__r, B, Vri_)) #5 res += l_sqrt_i.dot(K_l.dot(tm.eps_r_noop(r__sqrt, B, V_))) #6 res += sp.exp(-1.j * p) * l_sqrt_i.dot(Mh.dot(tm.eps_r_noop(K__r, A_, Vri_))) #8 y1 = sp.exp(+1.j * p) * tm.eps_r_noop(K__r, B, A_) #7 y2 = sp.exp(+1.j * p) * tm.eps_r_op_3s_C123_AAA456(r_, BAA_, C_) #9 y3 = sp.exp(+2.j * p) * tm.eps_r_op_3s_C123_AAA456(r_, ABA_, C_) #11 y4 = sp.exp(+3.j * p) * tm.eps_r_op_3s_C123_AAA456(r_, AAB, C_) #11b y = y1 + y2 + y3 + y4 if pseudo: y = y - m.adot(l, y) * r_ y_pi = self.calc_PPinv(y, p=p, A2=A_, r=r_, pseudo=pseudo, out=y_pi_prev, solver=pinv_solver) #print m.adot(l, y_pi) if self.sanity_checks: y2 = y_pi - sp.exp(+1.j * p) * tm.eps_r_noop(y_pi, self.A, A_) if not sp.allclose(y, y2): log.warning("Sanity Fail in calc_BHB! Bad x_pi. Off by: %g", (la.norm((y - y2).ravel()) / la.norm(y.ravel()))) res += l_sqrt.dot(tm.eps_r_noop(y_pi, self.A, Vri_)) if self.sanity_checks: expval = m.adot(x, res) / m.adot(x, x) #print "expval = " + str(expval) if expval < 0: log.warning("Sanity Fail in calc_BHB! H is not pos. semi-definite (%s)", expval) if not abs(expval.imag) < 1E-9: log.warning("Sanity Fail in calc_BHB! H is not Hermitian (%s)", expval) return res, M, y_pi def _prepare_excite_op_top_triv(self, p): if callable(self.ham): self.set_ham_array_from_function(self.ham) self.calc_K_l() self.calc_l_r_roots() self.Vsh = tm.calc_Vsh(self.A, self.r_sqrt, sanity_checks=self.sanity_checks) op = Excite_H_Op(self, self, p) return op def excite_top_triv(self, p, k=6, tol=0, max_itr=None, v0=None, ncv=None, sigma=None, which='SM', return_eigenvectors=False): """Calculates approximate eigenvectors and eigenvalues of the Hamiltonian using tangent vectors of the current state as ansatz states. This is best used with an approximate ground state to find approximate excitation energies. This uses topologically trivial ansatz states. Given a ground state degeneracy, topologically non-trivial low-lying eigenstates (such as kinks or solitons) may also exist. See self.excite_top_nontriv(). Many of the parameters are passed on to scipy.sparse.linalg.eigsh(). Parameters ---------- p : float Momentum in units of inverse lattice spacing. k : int Number of eigenvalues to calculate. tol : float Tolerance (defaults to machine precision). max_itr : int Maximum number of iterations. v0 : ndarray Starting vector. ncv : int Number of Arnoldi vectors to store. sigma : float Eigenvalue shift to use. which : string Which eigenvalues to find ('SM' means the k smallest). return_eigenvectors : bool Whether to return eigenvectors as well as eigenvalues. Returns ------- ev : ndarray List of eigenvalues. eV : ndarray Matrix of eigenvectors (if return_eigenvectors == True). """ op = self._prepare_excite_op_top_triv(p) res = las.eigsh(op, which=which, k=k, v0=v0, ncv=ncv, return_eigenvectors=return_eigenvectors, maxiter=max_itr, tol=tol, sigma=sigma) return res def excite_top_triv_brute(self, p, return_eigenvectors=False): op = self._prepare_excite_op_top_triv(p) x = np.empty(((self.q - 1)*self.D**2), dtype=self.typ) H = np.zeros((x.shape[0], x.shape[0]), dtype=self.typ) for i in xrange(x.shape[0]): x.fill(0) x[i] = 1 H[:, i] = op.matvec(x) if not np.allclose(H, H.conj().T): log.warning("Warning! H is not Hermitian! %s", la.norm(H - H.conj().T)) return la.eigh(H, eigvals_only=not return_eigenvectors) def _prepare_excite_op_top_nontriv(self, donor, p): if callable(self.ham): self.set_ham_array_from_function(self.ham) if callable(donor.ham): donor.set_ham_array_from_function(donor.ham) # self.calc_lr() # self.restore_CF() # donor.calc_lr() # donor.restore_CF() self.phase_align(donor) self.update() #donor.update() self.calc_K_l() self.calc_l_r_roots() donor.calc_l_r_roots() donor.Vsh = tm.calc_Vsh(donor.A, donor.r_sqrt, sanity_checks=self.sanity_checks) op = Excite_H_Op(self, donor, p) return op def excite_top_nontriv(self, donor, p, k=6, tol=0, max_itr=None, v0=None, which='SM', return_eigenvectors=False, sigma=None, ncv=None): op = self._prepare_excite_op_top_nontriv(donor, p) res = las.eigsh(op, sigma=sigma, which=which, k=k, v0=v0, return_eigenvectors=return_eigenvectors, maxiter=max_itr, tol=tol, ncv=ncv) return res def excite_top_nontriv_brute(self, donor, p, return_eigenvectors=False): op = self._prepare_excite_op_top_nontriv(donor, p) x = np.empty(((self.q - 1)*self.D**2), dtype=self.typ) H = np.zeros((x.shape[0], x.shape[0]), dtype=self.typ) for i in xrange(x.shape[0]): x.fill(0) x[i] = 1 H[:, i] = op.matvec(x) if not np.allclose(H, H.conj().T): log.warning("Warning! H is not Hermitian! %s", la.norm(H - H.conj().T)) return la.eigh(H, eigvals_only=not return_eigenvectors) def find_min_h_brent(self, B, dtau_init, tol=5E-2, skipIfLower=False, verbose=False, use_tangvec_overlap=False, max_iter=20): A0 = self.A.copy() AA0 = self.AA.copy() try: AAA0 = self.AAA.copy() except: AAA0 = None C0 = self.C.copy() K0 = self.K.copy() h_expect_0 = self.h_expect.copy() try: l0 = self.l self.l = self.l.A except: l0 = self.l.copy() pass try: r0 = self.r self.r = self.r.A except: r0 = self.r.copy() pass taus=[0] if use_tangvec_overlap: ress=[self.eta.real] else: ress=[h_expect_0.real] hs=[h_expect_0.real] ls = [self.l_before_CF.copy()] rs = [self.r_before_CF.copy()] Ks = [K0] def f(tau, *args): if tau < 0: if use_tangvec_overlap: res = tau**2 + self.eta.real else: res = tau**2 + h_expect_0.real log.debug((tau, res, "punishing negative tau!")) taus.append(tau) ress.append(res) hs.append(h_expect_0.real) ls.append(l0) rs.append(r0) Ks.append(K0) return res try: i = taus.index(tau) log.debug((tau, ress[i], "from stored")) return ress[i] except ValueError: for s in xrange(self.q): self.A[s] = A0[s] - tau * B[s] if len(taus) > 0: nearest_tau_ind = abs(np.array(taus) - tau).argmin() self.l_before_CF = ls[nearest_tau_ind] #needn't copy these self.r_before_CF = rs[nearest_tau_ind] #self.l_before_CF = l0 #self.r_before_CF = r0 if use_tangvec_overlap: self.K = Ks[nearest_tau_ind].copy() if use_tangvec_overlap: self.update(restore_CF=False) Bg = self.calc_B(set_eta=False) res = abs(m.adot(self.l, tm.eps_r_noop(self.r, Bg, B))) h_exp = self.h_expect.real else: self.calc_lr() if self.ham_sites == 2: self.calc_AA() self.calc_C() if self.ham_sites == 2: h_exp = self.expect_2s(self.ham).real else: h_exp = self.expect_3s(self.ham).real res = h_exp log.debug((tau, res, h_exp, h_exp - h_expect_0.real, self.itr_l, self.itr_r)) taus.append(tau) ress.append(res) hs.append(h_exp) ls.append(self.l.copy()) rs.append(self.r.copy()) if use_tangvec_overlap: Ks.append(self.K.copy()) else: Ks.append(None) return res if skipIfLower: if f(dtau_init) < self.h_expect.real: return dtau_init brack_init = (dtau_init * 0.9, dtau_init * 1.5) attempt = 1 while attempt < 3: try: log.debug("CG: Bracketing...") xa, xb, xc, fa, fb, fc, funcalls = opti.bracket(f, xa=brack_init[0], xb=brack_init[1], maxiter=5) brack = (xa, xb, xc) log.debug("CG: Using bracket = " + str(brack)) break except RuntimeError: log.debug("CG: Bracketing failed, attempt %u." % attempt) brack_init = (brack_init[0] * 0.1, brack_init[1] * 0.1) attempt += 1 if attempt == 3: log.debug("CG: Bracketing failed. Aborting!") tau_opt = 0 h_min = h_expect_0.real self.l_before_CF = l0 self.r_before_CF = r0 else: try: tau_opt, res_min, itr, calls = opti.brent(f, brack=brack, tol=tol, maxiter=max_iter, full_output=True) #hopefully optimize next calc_lr nearest_tau_ind = abs(np.array(taus) - tau_opt).argmin() self.l_before_CF = ls[nearest_tau_ind] self.r_before_CF = rs[nearest_tau_ind] i = taus.index(tau_opt) h_min = hs[i] except ValueError: log.debug("CG: Bad bracket. Aborting!") tau_opt = 0 h_min = h_expect_0.real self.l_before_CF = l0 self.r_before_CF = r0 #Must restore everything needed for take_step self.A = A0 self.l = l0 self.r = r0 self.AA = AA0 self.AAA = AAA0 self.C = C0 self.K = K0 self.h_expect = h_expect_0 return tau_opt, h_min def step_reduces_h(self, B, dtau): A0 = self.A.copy() AA0 = self.AA.copy() C0 = self.C.copy() try: l0 = self.l self.l = self.l.A except: l0 = self.l.copy() pass try: r0 = self.r self.r = self.r.A except: r0 = self.r.copy() pass for s in xrange(self.q): self.A[s] = A0[s] - dtau * B[s] self.calc_lr() self.calc_AA() self.calc_C() if self.ham_sites == 2: h = self.expect_2s(self.ham) else: h = self.expect_3s(self.ham) #Must restore everything needed for take_step self.A = A0 self.l = l0 self.r = r0 self.AA = AA0 self.C = C0 return h.real < self.h_expect.real, h def calc_B_CG(self, B_CG_0, eta_0, dtau_init, reset=False, verbose=False, switch_threshold_eta=1E-6): """Calculates a tangent vector using the non-linear conjugate gradient method. Parameters: B_CG_0 : ndarray Tangent vector used to make the previous step. Ignored on reset. eta_0 : float Norm of the previous tangent vector. dtau_init : float Initial step-size for the line-search. reset : bool = False Whether to perform a reset, using the gradient as the next search direction. switch_threshold_eta : float Sets the state tolerance (eta) below which the gradient should be used to determine the energetic minimum in a given direction, rather of the value of the energy. The gradient method is more expensive, but is much more robust for small . """ B = self.calc_B() eta = self.eta if reset: beta = 0. log.debug("CG RESET") B_CG = B else: beta = (eta**2) / eta_0**2 log.debug("BetaFR = %s", beta) beta = max(0, beta.real) B_CG = B + beta * B_CG_0 lb0 = self.l_before_CF.copy() rb0 = self.r_before_CF.copy() h_expect = self.h_expect.real.copy() eta_low = eta < switch_threshold_eta #Energy differences become too small here... log.debug("CG low eta: " + str(eta_low)) tau, h_min = self.find_min_h_brent(B_CG, dtau_init, verbose=verbose, use_tangvec_overlap=eta_low) if tau == 0: log.debug("CG RESET!") B_CG = B elif not eta_low and h_min > h_expect: log.debug("CG RESET due to energy rise!") B_CG = B self.l_before_CF = lb0 self.r_before_CF = rb0 tau, h_min = self.find_min_h_brent(B_CG, dtau_init * 0.1, use_tangvec_overlap=False) if h_expect < h_min: log.debug("CG RESET FAILED: Setting tau=0!") self.l_before_CF = lb0 self.r_before_CF = rb0 tau = 0 return B_CG, B, eta, tau def export_state(self, userdata=None): if userdata is None: userdata = self.userdata l = np.asarray(self.l) r = np.asarray(self.r) tosave = np.empty((5), dtype=np.ndarray) tosave[0] = self.A tosave[1] = l tosave[2] = r tosave[3] = self.K tosave[4] = np.asarray(userdata) return tosave def save_state(self, file, userdata=None): np.save(file, self.export_state(userdata)) def import_state(self, state, expand=False, truncate=False, expand_q=False, shrink_q=False, refac=0.1, imfac=0.1): newA = state[0] newl = state[1] newr = state[2] newK = state[3] if state.shape[0] > 4: self.userdata = state[4] if (newA.shape == self.A.shape): self.A[:] = newA self.K[:] = newK self.l = np.asarray(newl) self.r = np.asarray(newr) self.l_before_CF = self.l self.r_before_CF = self.r return True elif expand and (len(newA.shape) == 3) and (newA.shape[0] == self.A.shape[0]) and (newA.shape[1] == newA.shape[2]) and (newA.shape[1] <= self.A.shape[1]): newD = self.D savedD = newA.shape[1] self._init_arrays(savedD, self.q) self.A[:] = newA self.l = newl self.r = newr self.K[:] = newK self.expand_D(newD, refac, imfac) self.l_before_CF = self.l self.r_before_CF = self.r log.warning("EXPANDED!") elif truncate and (len(newA.shape) == 3) \ and (newA.shape[0] == self.A.shape[0]) \ and (newA.shape[1] == newA.shape[2]) \ and (newA.shape[1] >= self.A.shape[1]): newD = self.D savedD = newA.shape[1] self._init_arrays(savedD, self.q) self.A[:] = newA self.l = newl self.r = newr self.K[:] = newK self.update() # to make absolutely sure we're in CF self.truncate(newD, update=True) log.warning("TRUNCATED!") elif expand_q and (len(newA.shape) == 3) and (newA.shape[0] <= self.A.shape[0]) and (newA.shape[1] == newA.shape[2]) and (newA.shape[1] == self.A.shape[1]): newQ = self.q savedQ = newA.shape[0] self._init_arrays(self.D, savedQ) self.A[:] = newA self.l = newl self.r = newr self.K[:] = newK self.expand_q(newQ) self.l_before_CF = self.l self.r_before_CF = self.r log.warning("EXPANDED in q!") elif shrink_q and (len(newA.shape) == 3) and (newA.shape[0] >= self.A.shape[0]) and (newA.shape[1] == newA.shape[2]) and (newA.shape[1] == self.A.shape[1]): newQ = self.q savedQ = newA.shape[0] self._init_arrays(self.D, savedQ) self.A[:] = newA self.l = newl self.r = newr self.K[:] = newK self.shrink_q(newQ) self.l_before_CF = self.l self.r_before_CF = self.r log.warning("SHRUNK in q!") else: return False def load_state(self, file, expand=False, truncate=False, expand_q=False, shrink_q=False, refac=0.1, imfac=0.1): state = np.load(file) return self.import_state(state, expand=expand, truncate=truncate, expand_q=expand_q, shrink_q=shrink_q, refac=refac, imfac=imfac) def set_q(self, newq): oldK = self.K super(EvoMPS_TDVP_Uniform, self).set_q(newq) self.K = oldK def expand_D(self, newD, refac=100, imfac=0): oldK = self.K oldD = self.D super(EvoMPS_TDVP_Uniform, self).expand_D(newD, refac=refac, imfac=imfac) #self._init_arrays(newD, self.q) self.K[:oldD, :oldD] = oldK self.K[oldD:, :oldD].fill(la.norm(oldK) / oldD**2) self.K[:oldD, oldD:].fill(la.norm(oldK) / oldD**2) self.K[oldD:, oldD:].fill(la.norm(oldK) / oldD**2) def expect_2s(self, op): if op is self.ham and self.ham_sites == 2: res = tm.eps_r_op_2s_C12_AA34(self.r, self.C, self.AA) return m.adot(self.l, res) else: return super(EvoMPS_TDVP_Uniform, self).expect_2s(op) def expect_3s(self, op): if op is self.ham and self.ham_sites == 3: res = tm.eps_r_op_3s_C123_AAA456(self.r, self.C, self.AAA) return m.adot(self.l, res) else: return super(EvoMPS_TDVP_Uniform, self).expect_3s(op)
bsd-3-clause
actmd/Diamond
src/collectors/rabbitmq/test/testrabbitmq.py
10
7825
#!/usr/bin/python # coding=utf-8 ########################################################################## from test import CollectorTestCase from test import get_collector_config from test import unittest from mock import Mock from mock import patch from diamond.collector import Collector from rabbitmq import RabbitMQCollector ########################################################################## class TestRabbitMQCollector(CollectorTestCase): def setUp(self): config = get_collector_config('RabbitMQCollector', { 'host': 'localhost:55672', 'user': 'guest', 'password': 'password', 'queues_ignored': '^ignored', 'cluster': True, }) self.collector = RabbitMQCollector(config, None) def test_import(self): self.assertTrue(RabbitMQCollector) @patch('rabbitmq.RabbitMQClient') @patch.object(Collector, 'publish') def test_should_publish_nested_keys(self, publish_mock, client_mock): client = Mock() queue_data = [{ 'more_keys': {'nested_key': 1}, 'key': 2, 'string': 'str', 'name': 'test_queue' }, { 'name': 'ignored', 'more_keys': {'nested_key': 1}, 'key': 2, 'string': 'str', }] overview_data = { 'node': 'rabbit@localhost', 'more_keys': {'nested_key': 3}, 'key': 4, 'string': 'string', } node_health = { 'fd_used': 1, 'fd_total': 2, 'mem_used': 2, 'mem_limit': 4, 'sockets_used': 1, 'sockets_total': 2, 'disk_free_limit': 1, 'disk_free': 1, 'proc_used': 1, 'proc_total': 1, 'partitions': [], } client_mock.return_value = client client.get_queues.return_value = queue_data client.get_overview.return_value = overview_data client.get_nodes.return_value = [1, 2, 3] client.get_node.return_value = node_health self.collector.collect() client.get_queues.assert_called_once_with(None) client.get_nodes.assert_called_once_with() client.get_node.assert_called_once_with('rabbit@localhost') metrics = { 'queues.test_queue.more_keys.nested_key': 1, 'queues.test_queue.key': 2, 'more_keys.nested_key': 3, 'key': 4, 'health.fd_used': 1, 'health.fd_total': 2, 'health.mem_used': 2, 'health.mem_limit': 4, 'health.sockets_used': 1, 'health.sockets_total': 2, 'health.disk_free_limit': 1, 'health.disk_free': 1, 'health.proc_used': 1, 'health.proc_total': 1, 'cluster.partitions': 0, 'cluster.nodes': 3 } self.setDocExample(collector=self.collector.__class__.__name__, metrics=metrics, defaultpath=self.collector.config['path']) self.assertPublishedMany(publish_mock, metrics) @patch('rabbitmq.RabbitMQClient') @patch.object(Collector, 'publish') def test_opt_should_replace_dots(self, publish_mock, client_mock): self.collector.config['replace_dot'] = '_' client = Mock() queue_data = [{ 'more_keys': {'nested_key': 1}, 'key': 2, 'string': 'str', 'name': 'test.queue' }, { 'name': 'ignored', 'more_keys': {'nested_key': 1}, 'key': 2, 'string': 'str', }] overview_data = { 'node': 'rabbit@localhost', 'more_keys': {'nested_key': 3}, 'key': 4, 'string': 'string', } node_health = { 'fd_used': 1, 'fd_total': 2, 'mem_used': 2, 'mem_limit': 4, 'sockets_used': 1, 'sockets_total': 2, 'disk_free_limit': 1, 'disk_free': 1, 'proc_used': 1, 'proc_total': 1, 'partitions': [], } client_mock.return_value = client client.get_queues.return_value = queue_data client.get_overview.return_value = overview_data client.get_nodes.return_value = [1, 2, 3] client.get_node.return_value = node_health self.collector.collect() metrics = { 'queues.test_queue.more_keys.nested_key': 1, 'queues.test_queue.key': 2, 'more_keys.nested_key': 3, 'key': 4, 'health.fd_used': 1, 'health.fd_total': 2, 'health.mem_used': 2, 'health.mem_limit': 4, 'health.sockets_used': 1, 'health.sockets_total': 2, 'health.disk_free_limit': 1, 'health.disk_free': 1, 'health.proc_used': 1, 'health.proc_total': 1, 'cluster.partitions': 0, 'cluster.nodes': 3 } self.setDocExample(collector=self.collector.__class__.__name__, metrics=metrics, defaultpath=self.collector.config['path']) self.assertPublishedMany(publish_mock, metrics) self.collector.config['replace_dot'] = False @patch('rabbitmq.RabbitMQClient') @patch.object(Collector, 'publish') def test_opt_should_replace_slashes(self, publish_mock, client_mock): self.collector.config['replace_slash'] = '_' client = Mock() queue_data = [{ 'more_keys': {'nested_key': 1}, 'key': 2, 'string': 'str', 'name': 'test/queue' }, { 'name': 'ignored', 'more_keys': {'nested_key': 1}, 'key': 2, 'string': 'str', }] overview_data = { 'node': 'rabbit@localhost', 'more_keys': {'nested_key': 3}, 'key': 4, 'string': 'string', } node_health = { 'fd_used': 1, 'fd_total': 2, 'mem_used': 2, 'mem_limit': 4, 'sockets_used': 1, 'sockets_total': 2, 'disk_free_limit': 1, 'disk_free': 1, 'proc_used': 1, 'proc_total': 1, 'partitions': [], } client_mock.return_value = client client.get_queues.return_value = queue_data client.get_overview.return_value = overview_data client.get_nodes.return_value = [1, 2, 3] client.get_node.return_value = node_health self.collector.collect() metrics = { 'queues.test_queue.more_keys.nested_key': 1, 'queues.test_queue.key': 2, 'more_keys.nested_key': 3, 'key': 4, 'health.fd_used': 1, 'health.fd_total': 2, 'health.mem_used': 2, 'health.mem_limit': 4, 'health.sockets_used': 1, 'health.sockets_total': 2, 'health.disk_free_limit': 1, 'health.disk_free': 1, 'health.proc_used': 1, 'health.proc_total': 1, 'cluster.partitions': 0, 'cluster.nodes': 3 } self.setDocExample(collector=self.collector.__class__.__name__, metrics=metrics, defaultpath=self.collector.config['path']) self.assertPublishedMany(publish_mock, metrics) self.collector.config['replace_slash'] = False ########################################################################## if __name__ == "__main__": unittest.main()
mit
kthordarson/youtube-dl-ruv
youtube_dl/extractor/sharesix.py
5
2667
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( compat_urllib_parse, compat_urllib_request, parse_duration, ) class ShareSixIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?sharesix\.com/(?:f/)?(?P<id>[0-9a-zA-Z]+)' _TESTS = [ { 'url': 'http://sharesix.com/f/OXjQ7Y6', 'md5': '9e8e95d8823942815a7d7c773110cc93', 'info_dict': { 'id': 'OXjQ7Y6', 'ext': 'mp4', 'title': 'big_buck_bunny_480p_surround-fix.avi', 'duration': 596, 'width': 854, 'height': 480, }, }, { 'url': 'http://sharesix.com/lfrwoxp35zdd', 'md5': 'dd19f1435b7cec2d7912c64beeee8185', 'info_dict': { 'id': 'lfrwoxp35zdd', 'ext': 'flv', 'title': 'WhiteBoard___a_Mac_vs_PC_Parody_Cartoon.mp4.flv', 'duration': 65, 'width': 1280, 'height': 720, }, } ] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') fields = { 'method_free': 'Free' } post = compat_urllib_parse.urlencode(fields) req = compat_urllib_request.Request(url, post) req.add_header('Content-type', 'application/x-www-form-urlencoded') webpage = self._download_webpage(req, video_id, 'Downloading video page') video_url = self._search_regex( r"var\slnk1\s=\s'([^']+)'", webpage, 'video URL') title = self._html_search_regex( r'(?s)<dt>Filename:</dt>.+?<dd>(.+?)</dd>', webpage, 'title') duration = parse_duration( self._search_regex( r'(?s)<dt>Length:</dt>.+?<dd>(.+?)</dd>', webpage, 'duration', fatal=False ) ) m = re.search( r'''(?xs)<dt>Width\sx\sHeight</dt>.+? <dd>(?P<width>\d+)\sx\s(?P<height>\d+)</dd>''', webpage ) width = height = None if m: width, height = int(m.group('width')), int(m.group('height')) formats = [{ 'format_id': 'sd', 'url': video_url, 'width': width, 'height': height, }] return { 'id': video_id, 'title': title, 'duration': duration, 'formats': formats, }
unlicense
alexholehouse/SBMLIntegrator
libsbml-5.0.0/src/bindings/python/test/math/TestReadMathML.py
1
31941
# # @file TestReadMathML.py # @brief Read MathML unit tests # # @author Akiya Jouraku (Python conversion) # @author Ben Bornstein # # $Id: TestReadMathML.py 11441 2010-07-09 02:22:23Z mhucka $ # $HeadURL: https://sbml.svn.sourceforge.net/svnroot/sbml/trunk/libsbml/src/bindings/python/test/math/TestReadMathML.py $ # # ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ====== # # DO NOT EDIT THIS FILE. # # This file was generated automatically by converting the file located at # src/math/test/TestReadMathML.cpp # using the conversion program dev/utilities/translateTests/translateTests.pl. # Any changes made here will be lost the next time the file is regenerated. # # ----------------------------------------------------------------------------- # This file is part of libSBML. Please visit http://sbml.org for more # information about SBML, and the latest version of libSBML. # # Copyright 2005-2010 California Institute of Technology. # Copyright 2002-2005 California Institute of Technology and # Japan Science and Technology Corporation. # # This library is free software; you can redistribute it and/or modify it # under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation. A copy of the license agreement is provided # in the file named "LICENSE.txt" included with this software distribution # and also available online as http://sbml.org/software/libsbml/license.html # ----------------------------------------------------------------------------- import sys import unittest import libsbml def util_isInf(*x): return ( (x[0] == util_PosInf()) or (x[0] == util_NegInf()) ) def util_NaN(): z = 1e300 z = z * z return z - z def util_PosInf(): z = 1e300 z = z * z return z def util_NegInf(): z = 1e300 z = z * z return -z def wrapString(s): return s pass def MATHML_FOOTER(): return "</math>" pass def MATHML_HEADER(): return "<math xmlns='http://www.w3.org/1998/Math/MathML'>\n" pass def MATHML_HEADER_UNITS(): return "<math xmlns='http://www.w3.org/1998/Math/MathML'\n" pass def MATHML_HEADER_UNITS2(): return " xmlns:sbml='http://www.sbml.org/sbml/level3/version1/core'>\n" pass def XML_HEADER(): return "<?xml version='1.0' encoding='UTF-8'?>\n" pass def isnan(x): return (x != x) pass def wrapMathML(s): r = XML_HEADER() r += MATHML_HEADER() r += s r += MATHML_FOOTER() return r pass def wrapMathMLUnits(s): r = XML_HEADER() r += MATHML_HEADER_UNITS() r += MATHML_HEADER_UNITS2() r += s r += MATHML_FOOTER() return r pass def wrapXML(s): r = XML_HEADER() r += s return r pass class TestReadMathML(unittest.TestCase): global F F = None global N N = None def setUp(self): self.N = None self.F = None pass def tearDown(self): self.N = None self.F = None pass def test_element_abs(self): s = wrapMathML("<apply><abs/><ci> x </ci></apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "abs(x)" == self.F )) pass def test_element_and(self): s = wrapMathML("<apply> <and/> <ci>a</ci> <ci>b</ci> <ci>c</ci> </apply>" ) self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "and(a, b, c)" == self.F )) pass def test_element_arccos(self): s = wrapMathML("<apply><arccos/><ci> x </ci></apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "acos(x)" == self.F )) pass def test_element_arccosh(self): s = wrapMathML("<apply><arccosh/><ci> x </ci></apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "arccosh(x)" == self.F )) pass def test_element_arccot(self): s = wrapMathML("<apply><arccot/><ci> x </ci></apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "arccot(x)" == self.F )) pass def test_element_arccoth(self): s = wrapMathML("<apply><arccoth/><ci> x </ci></apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "arccoth(x)" == self.F )) pass def test_element_arccsc(self): s = wrapMathML("<apply><arccsc/><ci> x </ci></apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "arccsc(x)" == self.F )) pass def test_element_arccsch(self): s = wrapMathML("<apply><arccsch/><ci> x </ci></apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "arccsch(x)" == self.F )) pass def test_element_arcsec(self): s = wrapMathML("<apply><arcsec/><ci> x </ci></apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "arcsec(x)" == self.F )) pass def test_element_arcsech(self): s = wrapMathML("<apply><arcsech/><ci> x </ci></apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "arcsech(x)" == self.F )) pass def test_element_arcsin(self): s = wrapMathML("<apply><arcsin/><ci> x </ci></apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "asin(x)" == self.F )) pass def test_element_arcsinh(self): s = wrapMathML("<apply><arcsinh/><ci> x </ci></apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "arcsinh(x)" == self.F )) pass def test_element_arctan(self): s = wrapMathML("<apply><arctan/><ci> x </ci></apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "atan(x)" == self.F )) pass def test_element_arctanh(self): s = wrapMathML("<apply><arctanh/><ci> x </ci></apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "arctanh(x)" == self.F )) pass def test_element_bug_apply_ci_1(self): s = wrapMathML("<apply>" + " <ci> Y </ci>" + " <cn> 1 </cn>" + "</apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.assert_( self.N.getType() == libsbml.AST_FUNCTION ) self.assert_(( "Y" == self.N.getName() )) self.assert_( self.N.getNumChildren() == 1 ) c = self.N.getLeftChild() self.assert_( c != None ) self.assert_( c.getType() == libsbml.AST_REAL ) self.assert_( c.getReal() == 1 ) self.assert_( c.getNumChildren() == 0 ) pass def test_element_bug_apply_ci_2(self): s = wrapMathML("<apply>" + " <ci> Y </ci>" + " <csymbol encoding='text' " + " definitionURL='http://www.sbml.org/sbml/symbols/time'> t </csymbol>" + "</apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.assert_( self.N.getType() == libsbml.AST_FUNCTION ) self.assert_(( "Y" == self.N.getName() )) self.assert_( self.N.getNumChildren() == 1 ) c = self.N.getLeftChild() self.assert_( c != None ) self.assert_( c.getType() == libsbml.AST_NAME_TIME ) self.assert_(( "t" == c.getName() )) self.assert_( c.getNumChildren() == 0 ) pass def test_element_bug_cn_e_notation_1(self): s = wrapMathML("<cn type='e-notation'> 2 <sep/> -8 </cn>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.assert_( self.N.getType() == libsbml.AST_REAL_E ) self.assert_( self.N.getMantissa() == 2.0 ) self.assert_( self.N.getExponent() == -8.0 ) self.assert_( self.N.getNumChildren() == 0 ) pass def test_element_bug_cn_e_notation_2(self): s = wrapMathML("<cn type='e-notation'> -3 <sep/> 4 </cn>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.assert_( self.N.getType() == libsbml.AST_REAL_E ) self.assert_( self.N.getMantissa() == -3.0 ) self.assert_( self.N.getExponent() == 4.0 ) self.assert_( self.N.getNumChildren() == 0 ) pass def test_element_bug_cn_e_notation_3(self): s = wrapMathML("<cn type='e-notation'> -6 <sep/> -1 </cn>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.assert_( self.N.getType() == libsbml.AST_REAL_E ) self.assert_( self.N.getMantissa() == -6.0 ) self.assert_( self.N.getExponent() == -1.0 ) self.assert_( self.N.getNumChildren() == 0 ) pass def test_element_bug_cn_integer_negative(self): s = wrapMathML("<cn type='integer'> -7 </cn>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.assert_( self.N.getType() == libsbml.AST_INTEGER ) self.assert_( self.N.getInteger() == -7 ) self.assert_( self.N.getNumChildren() == 0 ) pass def test_element_bug_csymbol_1(self): s = wrapMathML("<apply>" + " <gt/>" + " <csymbol encoding='text' " + " definitionURL='http://www.sbml.org/sbml/symbols/time'>time</csymbol>" + " <cn>5000</cn>" + "</apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.assert_( self.N.getType() == libsbml.AST_RELATIONAL_GT ) self.assert_( self.N.getNumChildren() == 2 ) c = self.N.getLeftChild() self.assert_( c != None ) self.assert_( c.getType() == libsbml.AST_NAME_TIME ) self.assert_(( "time" == c.getName() )) self.assert_( c.getNumChildren() == 0 ) c = self.N.getRightChild() self.assert_( c != None ) self.assert_( c.getType() == libsbml.AST_REAL ) self.assert_( c.getReal() == 5000 ) self.assert_( c.getNumChildren() == 0 ) pass def test_element_bug_csymbol_delay_1(self): s = wrapMathML("<apply>" + " <csymbol encoding='text' definitionURL='http://www.sbml.org/sbml/" + "symbols/delay'> my_delay </csymbol>" + " <ci> x </ci>" + " <cn> 0.1 </cn>" + "</apply>\n") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.assert_( self.N.getType() == libsbml.AST_FUNCTION_DELAY ) self.assert_(( "my_delay" == self.N.getName() )) self.assert_( self.N.getNumChildren() == 2 ) c = self.N.getLeftChild() self.assert_( c != None ) self.assert_( c.getType() == libsbml.AST_NAME ) self.assert_(( "x" == c.getName() )) self.assert_( c.getNumChildren() == 0 ) c = self.N.getRightChild() self.assert_( c != None ) self.assert_( c.getType() == libsbml.AST_REAL ) self.assert_( c.getReal() == 0.1 ) self.assert_( c.getNumChildren() == 0 ) pass def test_element_bug_math_xmlns(self): s = wrapXML("<foo:math xmlns:foo='http://www.w3.org/1998/Math/MathML'>" + " <foo:apply>" + " <foo:plus/> <foo:cn>1</foo:cn> <foo:cn>2</foo:cn>" + " </foo:apply>" + "</foo:math>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "1 + 2" == self.F )) pass def test_element_ceiling(self): s = wrapMathML("<apply><ceiling/><cn> 1.6 </cn></apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "ceil(1.6)" == self.F )) pass def test_element_ci(self): s = wrapMathML("<ci> x </ci>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.assert_( self.N.getType() == libsbml.AST_NAME ) self.assert_(( "x" == self.N.getName() )) self.assert_( self.N.getNumChildren() == 0 ) pass def test_element_ci_definitionURL(self): s = wrapMathML("<ci definitionURL=\"foobar\"> x </ci>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.assert_( self.N.getType() == libsbml.AST_NAME ) self.assert_(( "x" == self.N.getName() )) self.assert_( self.N.getNumChildren() == 0 ) self.assert_( self.N.getDefinitionURL().getValue(0) == "foobar" ) pass def test_element_ci_surrounding_spaces_bug(self): s = wrapMathML(" <ci> s </ci> ") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.assert_( self.N.getType() == libsbml.AST_NAME ) self.assert_(( "s" == self.N.getName() )) self.assert_( self.N.getNumChildren() == 0 ) pass def test_element_cn_default(self): s = wrapMathML("<cn> 12345.7 </cn>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.assert_( self.N.getType() == libsbml.AST_REAL ) self.assert_( self.N.getReal() == 12345.7 ) self.assert_( self.N.getNumChildren() == 0 ) pass def test_element_cn_e_notation(self): s = wrapMathML("<cn type='e-notation'> 12.3 <sep/> 5 </cn>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.assert_( self.N.getType() == libsbml.AST_REAL_E ) self.assert_( self.N.getMantissa() == 12.3 ) self.assert_( self.N.getExponent() == 5 ) self.assert_( self.N.getNumChildren() == 0 ) pass def test_element_cn_integer(self): s = wrapMathML("<cn type='integer'> 12345 </cn>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.assert_( self.N.getType() == libsbml.AST_INTEGER ) self.assert_( self.N.getInteger() == 12345 ) self.assert_( self.N.getNumChildren() == 0 ) pass def test_element_cn_rational(self): s = wrapMathML("<cn type='rational'> 12342 <sep/> 2342342 </cn>" ) self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.assert_( self.N.getType() == libsbml.AST_RATIONAL ) self.assert_( self.N.getNumerator() == 12342 ) self.assert_( self.N.getDenominator() == 2342342 ) self.assert_( self.N.getNumChildren() == 0 ) pass def test_element_cn_real(self): s = wrapMathML("<cn type='real'> 12345.7 </cn>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.assert_( self.N.getType() == libsbml.AST_REAL ) self.assert_( self.N.getReal() == 12345.7 ) self.assert_( self.N.getNumChildren() == 0 ) pass def test_element_cn_units(self): s = wrapMathMLUnits("<cn sbml:units=\"mole\"> 12345.7 </cn>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.assert_( self.N.getType() == libsbml.AST_REAL ) self.assert_( self.N.getReal() == 12345.7 ) self.assert_( self.N.getUnits() == "mole" ) self.assert_( self.N.getNumChildren() == 0 ) pass def test_element_constants_exponentiale(self): s = wrapMathML("<exponentiale/>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.assert_( self.N.getType() == libsbml.AST_CONSTANT_E ) self.assert_( self.N.getNumChildren() == 0 ) pass def test_element_constants_false(self): s = wrapMathML("<false/>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.assert_( self.N.getType() == libsbml.AST_CONSTANT_FALSE ) self.assert_( self.N.getNumChildren() == 0 ) pass def test_element_constants_infinity(self): s = wrapMathML("<infinity/>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.assert_( self.N.getType() == libsbml.AST_REAL ) self.assert_( util_isInf(self.N.getReal()) == True ) self.assert_( self.N.getNumChildren() == 0 ) pass def test_element_constants_notanumber(self): s = wrapMathML("<notanumber/>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.assert_( self.N.getType() == libsbml.AST_REAL ) self.assertEqual( True, isnan(self.N.getReal()) ) self.assert_( self.N.getNumChildren() == 0 ) pass def test_element_constants_pi(self): s = wrapMathML("<pi/>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.assert_( self.N.getType() == libsbml.AST_CONSTANT_PI ) self.assert_( self.N.getNumChildren() == 0 ) pass def test_element_constants_true(self): s = wrapMathML("<true/>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.assert_( self.N.getType() == libsbml.AST_CONSTANT_TRUE ) self.assert_( self.N.getNumChildren() == 0 ) pass def test_element_cos(self): s = wrapMathML("<apply><cos/><ci> x </ci></apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "cos(x)" == self.F )) pass def test_element_cosh(self): s = wrapMathML("<apply><cosh/><ci> x </ci></apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "cosh(x)" == self.F )) pass def test_element_cot(self): s = wrapMathML("<apply><cot/><ci> x </ci></apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "cot(x)" == self.F )) pass def test_element_coth(self): s = wrapMathML("<apply><coth/><ci> x </ci></apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "coth(x)" == self.F )) pass def test_element_csc(self): s = wrapMathML("<apply><csc/><ci> x </ci></apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "csc(x)" == self.F )) pass def test_element_csch(self): s = wrapMathML("<apply><csch/><ci> x </ci></apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "csch(x)" == self.F )) pass def test_element_csymbol_avogadro(self): s = wrapMathML("<csymbol encoding='text' " + "definitionURL='http://www.sbml.org/sbml/symbols/avogadro'> NA </csymbol>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.assert_( self.N.getType() == libsbml.AST_NAME_AVOGADRO ) self.assert_(( "NA" == self.N.getName() )) self.assert_( self.N.getNumChildren() == 0 ) pass def test_element_csymbol_delay_1(self): s = wrapMathML("<csymbol encoding='text' " + "definitionURL='http://www.sbml.org/sbml/symbols/delay'> delay </csymbol>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.assert_( self.N.getType() == libsbml.AST_FUNCTION_DELAY ) self.assert_(( "delay" == self.N.getName() )) self.assert_( self.N.getNumChildren() == 0 ) pass def test_element_csymbol_delay_2(self): s = wrapMathML("<apply>" + " <csymbol encoding='text' definitionURL='http://www.sbml.org/sbml/" + "symbols/delay'> my_delay </csymbol>" + " <ci> x </ci>" + " <cn> 0.1 </cn>" + "</apply>\n") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "my_delay(x, 0.1)" == self.F )) pass def test_element_csymbol_delay_3(self): s = wrapMathML("<apply>" + " <power/>" + " <apply>" + " <csymbol encoding='text' definitionURL='http://www.sbml.org/sbml/" + "symbols/delay'> delay </csymbol>" + " <ci> P </ci>" + " <ci> delta_t </ci>" + " </apply>\n" + " <ci> q </ci>" + "</apply>\n") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "pow(delay(P, delta_t), q)" == self.F )) pass def test_element_csymbol_time(self): s = wrapMathML("<csymbol encoding='text' " + "definitionURL='http://www.sbml.org/sbml/symbols/time'> t </csymbol>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.assert_( self.N.getType() == libsbml.AST_NAME_TIME ) self.assert_(( "t" == self.N.getName() )) self.assert_( self.N.getNumChildren() == 0 ) pass def test_element_eq(self): s = wrapMathML("<apply> <eq/> <ci>a</ci> <ci>b</ci> <ci>c</ci> </apply>" ) self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "eq(a, b, c)" == self.F )) pass def test_element_exp(self): s = wrapMathML("<apply><exp/><ci> x </ci></apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "exp(x)" == self.F )) pass def test_element_factorial(self): s = wrapMathML("<apply><factorial/><cn> 5 </cn></apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "factorial(5)" == self.F )) pass def test_element_floor(self): s = wrapMathML("<apply><floor/><cn> 1.2 </cn></apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "floor(1.2)" == self.F )) pass def test_element_function_call_1(self): s = wrapMathML("<apply> <ci> foo </ci> <ci> x </ci> </apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "foo(x)" == self.F )) pass def test_element_function_call_2(self): s = wrapMathML("<apply> <plus/> <cn> 1 </cn>" + " <apply> <ci> f </ci> <ci> x </ci> </apply>" + "</apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "1 + f(x)" == self.F )) pass def test_element_geq(self): s = wrapMathML("<apply> <geq/> <cn>1</cn> <ci>x</ci> <cn>0</cn> </apply>" ) self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "geq(1, x, 0)" == self.F )) pass def test_element_gt(self): s = wrapMathML("<apply> <gt/> <infinity/>" + " <apply> <minus/> <infinity/> <cn>1</cn> </apply>" + "</apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "gt(INF, INF - 1)" == self.F )) pass def test_element_invalid_mathml(self): invalid = wrapMathML("<lambda definitionURL=\"http://biomodels.net/SBO/#SBO:0000065\">" + "<bvar>" + "<ci>c</ci>" + "</bvar>" + "<apply>" + " <ci>c</ci>" + "</apply>" + "</lambda>\n") self.N = libsbml.readMathMLFromString(None) self.assert_( self.N == None ) self.N = libsbml.readMathMLFromString(invalid) self.assert_( self.N == None ) pass def test_element_lambda(self): s = wrapMathML("<lambda>" + " <bvar> <ci>x</ci> </bvar>" + " <apply> <sin/>" + " <apply> <plus/> <ci>x</ci> <cn>1</cn> </apply>" + " </apply>" + "</lambda>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "lambda(x, sin(x + 1))" == self.F )) pass def test_element_leq(self): s = wrapMathML("<apply> <leq/> <cn>0</cn> <ci>x</ci> <cn>1</cn> </apply>" ) self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "leq(0, x, 1)" == self.F )) pass def test_element_ln(self): s = wrapMathML("<apply><ln/><ci> a </ci></apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "log(a)" == self.F )) pass def test_element_log_1(self): s = wrapMathML("<apply> <log/> <logbase> <cn type='integer'> 3 </cn> </logbase>" + " <ci> x </ci>" + "</apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "log(3, x)" == self.F )) pass def test_element_log_2(self): s = wrapMathML("<apply> <log/> <ci> x </ci> </apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "log10(x)" == self.F )) pass def test_element_lt(self): s = wrapMathML("<apply> <lt/> <apply> <minus/> <infinity/> <infinity/> </apply>" + " <cn>1</cn>" + "</apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "lt(INF - INF, 1)" == self.F )) pass def test_element_math(self): s = wrapXML("<math xmlns='http://www.w3.org/1998/Math/MathML'/>" ) self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.assert_( self.N.getType() == libsbml.AST_UNKNOWN ) pass def test_element_neq(self): s = wrapMathML("<apply> <neq/> <notanumber/> <notanumber/> </apply>" ) self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "neq(NaN, NaN)" == self.F )) pass def test_element_not(self): s = wrapMathML("<apply> <not/> <ci> TooShabby </ci> </apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "not(TooShabby)" == self.F )) pass def test_element_operator_plus(self): s = wrapMathML("<apply> <plus/> <cn> 1 </cn> <cn> 2 </cn> <cn> 3 </cn> </apply>" ) self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "1 + 2 + 3" == self.F )) pass def test_element_operator_times(self): s = wrapMathML("<apply> <times/> <ci> x </ci> <ci> y </ci> <ci> z </ci> </apply>" ) self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "x * y * z" == self.F )) pass def test_element_or(self): s = wrapMathML("<apply> <or/> <ci>a</ci> <ci>b</ci> <ci>c</ci> <ci>d</ci> </apply>" ) self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "or(a, b, c, d)" == self.F )) pass def test_element_piecewise(self): s = wrapMathML("<piecewise>" + " <piece>" + " <apply> <minus/> <ci>x</ci> </apply>" + " <apply> <lt/> <ci>x</ci> <cn>0</cn> </apply>" + " </piece>" + " <piece>" + " <cn>0</cn>" + " <apply> <eq/> <ci>x</ci> <cn>0</cn> </apply>" + " </piece>" + " <piece>" + " <ci>x</ci>" + " <apply> <gt/> <ci>x</ci> <cn>0</cn> </apply>" + " </piece>" + "</piecewise>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "piecewise(-x, lt(x, 0), 0, eq(x, 0), x, gt(x, 0))" == self.F )) pass def test_element_piecewise_otherwise(self): s = wrapMathML("<piecewise>" + " <piece>" + " <cn>0</cn>" + " <apply> <lt/> <ci>x</ci> <cn>0</cn> </apply>" + " </piece>" + " <otherwise>" + " <ci>x</ci>" + " </otherwise>" + "</piecewise>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "piecewise(0, lt(x, 0), x)" == self.F )) pass def test_element_power(self): s = wrapMathML("<apply><power/> <ci>x</ci> <cn>3</cn> </apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "pow(x, 3)" == self.F )) pass def test_element_root_1(self): s = wrapMathML("<apply> <root/> <degree> <cn type='integer'> 3 </cn> </degree>" + " <ci> a </ci>" + "</apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "root(3, a)" == self.F )) pass def test_element_root_2(self): s = wrapMathML("<apply> <root/> <ci> a </ci> </apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "sqrt(a)" == self.F )) pass def test_element_sec(self): s = wrapMathML("<apply><sec/><ci> x </ci></apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "sec(x)" == self.F )) pass def test_element_sech(self): s = wrapMathML("<apply><sech/><ci> x </ci></apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "sech(x)" == self.F )) pass def test_element_sin(self): s = wrapMathML("<apply><sin/><ci> x </ci></apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "sin(x)" == self.F )) pass def test_element_sinh(self): s = wrapMathML("<apply><sinh/><ci> x </ci></apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "sinh(x)" == self.F )) pass def test_element_tan(self): s = wrapMathML("<apply><tan/><ci> x </ci></apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "tan(x)" == self.F )) pass def test_element_tanh(self): s = wrapMathML("<apply><tanh/><ci> x </ci></apply>") self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "tanh(x)" == self.F )) pass def test_element_xor(self): s = wrapMathML("<apply> <xor/> <ci>a</ci> <ci>b</ci> <ci>b</ci> <ci>a</ci> </apply>" ) self.N = libsbml.readMathMLFromString(s) self.assert_( self.N != None ) self.F = libsbml.formulaToString(self.N) self.assert_(( "xor(a, b, b, a)" == self.F )) pass def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(TestReadMathML)) return suite if __name__ == "__main__": if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() : sys.exit(0) else: sys.exit(1)
gpl-3.0
kdeloach/model-my-watershed
src/mmw/apps/monitoring/views.py
3
1740
# -*- coding: utf-8 -*- from __future__ import print_function from __future__ import unicode_literals from __future__ import division from rest_framework import status from django.http import JsonResponse from django.core.cache import caches from django.db import connections from mmw.middleware import bypass_middleware import uuid @bypass_middleware def health_check(request): response = {} for check in [_check_cache, _check_database]: response.update(check()) if all(map(lambda x: x[0]['default']['ok'], response.values())): return JsonResponse(response, status=status.HTTP_200_OK) else: return JsonResponse(response, status=status.HTTP_503_SERVICE_UNAVAILABLE) def _check_cache(cache='default'): key = 'health-check-{}'.format(uuid.uuid4()) try: caches[cache].set(key, uuid.uuid4()) # Loss of connectivity to Redis does not always throw an # exception because DJANGO_REDIS_IGNORE_EXCEPTIONS is True. if caches[cache].get(key) is None: response = {cache: {'ok': False}} else: response = {cache: {'ok': True}} caches[cache].delete(key) except Exception as e: response = { cache: { 'ok': False, 'msg': str(e) }, } return {'caches': [response]} def _check_database(database='default'): try: connections[database].introspection.table_names() response = {database: {'ok': True}} except Exception as e: response = { database: { 'ok': False, 'msg': str(e) }, } return {'databases': [response]}
apache-2.0
tarballs-are-good/sympy
sympy/physics/quantum/tests/test_dagger.py
2
1198
from sympy import I, Matrix, symbols, conjugate, Expr, Integer from sympy.physics.quantum.dagger import Dagger def test_scalars(): x = symbols('x',complex=True) assert Dagger(x) == conjugate(x) assert Dagger(I*x) == -I*conjugate(x) i = symbols('i',real=True) assert Dagger(i) == i p = symbols('p') assert isinstance(Dagger(p), Dagger) i = Integer(3) assert Dagger(i) == i def test_matrix(): x = symbols('x') m = Matrix([[I,x*I],[2,4]]) assert Dagger(m) == m.H class Foo(Expr): def _eval_dagger(self): return I def test_eval_dagger(): f = Foo() d = Dagger(f) assert d == I try: import numpy as np except ImportError: pass else: def test_numpy_dagger(): a = np.matrix([[1.0,2.0j],[-1.0j,2.0]]) adag = a.copy().transpose().conjugate() assert (Dagger(a) == adag).all() try: from scipy import sparse import numpy as np except ImportError: pass else: def test_scipy_sparse_dagger(): a = sparse.csr_matrix([[1.0+0.0j,2.0j],[-1.0j,2.0+0.0j]]) adag = a.copy().transpose().conjugate() assert np.linalg.norm((Dagger(a) - adag).todense()) == 0.0
bsd-3-clause
natetrue/ReplicatorG
skein_engines/skeinforge-0006/skeinforge_tools/skeinforge_utilities/intercircle.py
2
22704
""" Intercircle is a collection of utilities for intersecting circles, used to get smooth loops around a collection of points and inset & outset loops. """ from __future__ import absolute_import try: import psyco psyco.full() except: pass #Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module. import __init__ from skeinforge_tools.skeinforge_utilities.vector3 import Vector3 from skeinforge_tools.skeinforge_utilities import euclidean import math __author__ = "Enrique Perez (perez_enrique@yahoo.com)" __date__ = "$Date: 2008/21/04 $" __license__ = "GPL 3.0" def addCircleIntersectionLoop( circleIntersectionPathComplexes, circleIntersections ): "Add a circle intersection loop." firstCircleIntersection = circleIntersectionPathComplexes[ 0 ] circleIntersectionAhead = firstCircleIntersection for circleIntersectionIndex in xrange( len( circleIntersections ) + 1 ): circleIntersectionAhead = circleIntersectionAhead.getCircleIntersectionAhead() if circleIntersectionAhead.index == firstCircleIntersection.index: firstCircleIntersection.steppedOn = True return if circleIntersectionAhead.steppedOn == True: print( 'circleIntersectionAhead.steppedOn == True in intercircle.' ) print( circleIntersectionAhead ) circleIntersectionAhead.addToList( circleIntersectionPathComplexes ) firstCircleIntersection.steppedOn = True print( "addCircleIntersectionLoop would have gone into an endless loop, this should never happen." ) print( "circleIntersectionPathComplexes" ) for circleIntersectionComplex in circleIntersectionPathComplexes: print( circleIntersectionComplex ) print( circleIntersectionComplex.circleNodeAhead ) print( circleIntersectionComplex.circleNodeBehind ) print( "firstCircleIntersection" ) print( firstCircleIntersection ) print( "circleIntersections" ) for circleIntersectionComplex in circleIntersections: print( circleIntersectionComplex ) def addOperatingOrbits( boundaryLoops, pointComplex, skein, temperatureChangeTime, z ): "Add the orbits before the operating layers." if len( boundaryLoops ) < 1: return largestLength = - 999999999.0 largestLoop = None perimeterOutset = 0.4 * skein.extrusionPerimeterWidth greaterThanPerimeterOutset = 1.1 * perimeterOutset for boundaryLoop in boundaryLoops: centers = getCentersFromLoopDirection( True, boundaryLoop, greaterThanPerimeterOutset ) for center in centers: outset = getSimplifiedInsetFromClockwiseLoop( center, perimeterOutset ) if euclidean.isLargeSameDirection( outset, center, perimeterOutset ): loopLength = euclidean.getPolygonLength( outset ) if loopLength > largestLength: largestLength = loopLength largestLoop = outset if largestLoop == None: return if pointComplex != None: largestLoop = euclidean.getLoopStartingNearest( skein.extrusionPerimeterWidth, pointComplex, largestLoop ) addOrbits( largestLoop, skein, temperatureChangeTime, z ) def addOrbits( loop, skein, temperatureChangeTime, z ): "Add orbits with the extruder off." if len( loop ) < 1: print( 'Zero length loop which was skipped over, this should never happen.' ) if temperatureChangeTime < 1.5: return timeInOrbit = 0.0 while timeInOrbit < temperatureChangeTime: for point in loop: skein.addGcodeFromFeedrateMovementZ( 60.0 * skein.orbitalFeedratePerSecond, point, z ) timeInOrbit += euclidean.getPolygonLength( loop ) / skein.orbitalFeedratePerSecond def addPointsFromSegment( pointComplexes, radius, pointBeginComplex, pointEndComplex, thresholdRatio = 0.9 ): "Add point complexes between the endpoints of a segment." if radius <= 0.0: print( 'This should never happen, radius should never be zero or less in addPointsFromSegment in intercircle.' ) thresholdRadius = radius * thresholdRatio # a higher number would be faster but would leave bigger dangling loops. thresholdDiameter = thresholdRadius * 2.0 segmentComplex = pointEndComplex - pointBeginComplex segmentComplexLength = abs( segmentComplex ) extraCircles = int( math.floor( segmentComplexLength / thresholdDiameter ) ) lengthIncrement = segmentComplexLength / ( float( extraCircles ) + 1.0 ) if segmentComplexLength == 0.0: print( 'This should never happen, segmentComplexLength = 0.0 in intercircle.' ) print( 'pointBeginComplex' ) print( pointBeginComplex ) print( pointEndComplex ) return segmentComplex *= lengthIncrement / segmentComplexLength nextCircleCenterComplex = pointBeginComplex + segmentComplex for circleIndex in xrange( extraCircles ): pointComplexes.append( nextCircleCenterComplex ) nextCircleCenterComplex += segmentComplex def getCentersFromCircleNodes( circleNodesComplex ): "Get the complex centers of the circle intersection loops from circle nodes." if len( circleNodesComplex ) < 2: return [] circleIntersections = getCircleIntersectionsFromCircleNodes( circleNodesComplex ) circleIntersectionLoopComplexes = getCircleIntersectionLoops( circleIntersections ) return getCentersFromIntersectionLoops( circleIntersectionLoopComplexes ) def getCentersFromIntersectionLoop( circleIntersectionLoopComplex ): "Get the centers from the intersection loop." loop = [] for circleIntersectionComplex in circleIntersectionLoopComplex: loop.append( circleIntersectionComplex.circleNodeAhead.circle ) return loop def getCentersFromIntersectionLoops( circleIntersectionLoopComplexes ): "Get the centers from the intersection loops." centers = [] for circleIntersectionLoopComplex in circleIntersectionLoopComplexes: centers.append( getCentersFromIntersectionLoop( circleIntersectionLoopComplex ) ) return centers def getCentersFromLoopDirection( isWiddershins, loop, radius ): "Get the centers of the circle intersection loops which go around in the given direction." circleNodes = getCircleNodesFromLoop( loop, radius ) centers = getCentersFromCircleNodes( circleNodes ) return getLoopsFromLoopsDirection( isWiddershins, centers ) def getCircleIntersectionsFromCircleNodes( circleNodesComplex ): "Get all the circle intersections which exist between all the circle nodes." if len( circleNodesComplex ) < 1: return circleIntersections = [] index = 0 pixelTable = {} slightlyGreaterThanRadius = 1.01 * circleNodesComplex[ 0 ].radius for circleNode in circleNodesComplex: circleOverWidth = circleNode.circle / slightlyGreaterThanRadius x = int( round( circleOverWidth.real ) ) y = int( round( circleOverWidth.imag ) ) euclidean.addElementToPixelList( circleNode, pixelTable, x, y ) slightlyGreaterThanDiameter = slightlyGreaterThanRadius + slightlyGreaterThanRadius accumulatedCircleNodeTable = {} for circleNodeIndex in xrange( len( circleNodesComplex ) ): circleNodeBehind = circleNodesComplex[ circleNodeIndex ] circleNodeIndexMinusOne = circleNodeIndex - 1 if circleNodeIndexMinusOne >= 0: circleNodeAdditional = circleNodesComplex[ circleNodeIndexMinusOne ] circleOverSlightlyGreaterThanDiameter = circleNodeAdditional.circle / slightlyGreaterThanDiameter x = int( round( circleOverSlightlyGreaterThanDiameter.real ) ) y = int( round( circleOverSlightlyGreaterThanDiameter.imag ) ) euclidean.addElementToPixelList( circleNodeAdditional, accumulatedCircleNodeTable, x, y ) withinNodes = circleNodeBehind.getWithinNodes( accumulatedCircleNodeTable, slightlyGreaterThanDiameter ) for circleNodeAhead in withinNodes: circleIntersectionForward = CircleIntersection( circleNodeAhead, index, circleNodeBehind ) if not circleIntersectionForward.isWithinCircles( pixelTable, slightlyGreaterThanRadius ): circleIntersections.append( circleIntersectionForward ) circleNodeBehind.circleIntersections.append( circleIntersectionForward ) index += 1 circleIntersectionBackward = CircleIntersection( circleNodeBehind, index, circleNodeAhead ) if not circleIntersectionBackward.isWithinCircles( pixelTable, slightlyGreaterThanRadius ): circleIntersections.append( circleIntersectionBackward ) circleNodeAhead.circleIntersections.append( circleIntersectionBackward ) index += 1 return circleIntersections def getCircleIntersectionLoops( circleIntersections ): "Get all the loops going through the circle intersections." circleIntersectionLoopComplexes = [] for circleIntersectionComplex in circleIntersections: if not circleIntersectionComplex.steppedOn: circleIntersectionLoopComplex = [ circleIntersectionComplex ] circleIntersectionLoopComplexes.append( circleIntersectionLoopComplex ) addCircleIntersectionLoop( circleIntersectionLoopComplex, circleIntersections ) return circleIntersectionLoopComplexes def getCircleNodesFromLoop( loop, radius ): "Get the circle nodes from every point on a loop and between points." radius = abs( radius ) pointComplexes = [] for pointComplexIndex in xrange( len( loop ) ): pointComplex = loop[ pointComplexIndex ] pointComplexSecond = loop[ ( pointComplexIndex + 1 ) % len( loop ) ] pointComplexes.append( pointComplex ) addPointsFromSegment( pointComplexes, radius, pointComplex, pointComplexSecond ) return getCircleNodesFromPoints( pointComplexes, radius ) def getCircleNodesFromPoints( pointComplexes, radius ): "Get the circle nodes from a path." circleNodesComplex = [] pointComplexes = euclidean.getAwayPoints( pointComplexes, 0.001 * radius ) for pointComplex in pointComplexes: circleNodesComplex.append( CircleNode( pointComplex, len( circleNodesComplex ), radius ) ) return circleNodesComplex def getInsetFromClockwiseTriple( aheadAbsoluteComplex, behindAbsoluteComplex, centerComplex, radius ): "Get loop inset from clockwise triple, out from widdershins loop." originalCenterMinusBehindComplex = euclidean.getNormalized( centerComplex - behindAbsoluteComplex ) reverseRoundZAngle = complex( originalCenterMinusBehindComplex.real, - originalCenterMinusBehindComplex.imag ) aheadAbsoluteComplex *= reverseRoundZAngle behindAbsoluteComplex *= reverseRoundZAngle centerComplex *= reverseRoundZAngle aheadIntersectionComplex = getIntersectionAtInset( aheadAbsoluteComplex, centerComplex, radius ) behindIntersectionComplex = getIntersectionAtInset( centerComplex, behindAbsoluteComplex, radius ) centerComplexMinusAhead = centerComplex - aheadAbsoluteComplex if abs( centerComplexMinusAhead.imag ) < abs( 0.000001 * centerComplexMinusAhead.real ): between = 0.5 * ( aheadIntersectionComplex + behindIntersectionComplex ) return originalCenterMinusBehindComplex * between yMinusAhead = behindIntersectionComplex.imag - aheadIntersectionComplex.imag x = aheadIntersectionComplex.real + yMinusAhead * centerComplexMinusAhead.real / centerComplexMinusAhead.imag return originalCenterMinusBehindComplex * complex( x, behindIntersectionComplex.imag ) def getInsetFromClockwiseLoop( loop, radius ): "Get loop inset from clockwise loop, out from widdershins loop." insetLoopComplex = [] for pointComplexIndex in xrange( len( loop ) ): behindAbsoluteComplex = loop[ ( pointComplexIndex + len( loop ) - 1 ) % len( loop ) ] centerComplex = loop[ pointComplexIndex ] aheadAbsoluteComplex = loop[ ( pointComplexIndex + 1 ) % len( loop ) ] insetLoopComplex.append( getInsetFromClockwiseTriple( aheadAbsoluteComplex, behindAbsoluteComplex, centerComplex, radius ) ) return insetLoopComplex def getInsetLoops( inset, loops ): "Get the inset loops." insetLoops = [] for loop in loops: insetLoops += getInsetLoopsFromLoop( inset, loop ) return insetLoops def getInsetLoopsFromLoop( inset, loop ): "Get the inset loops from a loop." absoluteInset = abs( inset ) insetLoops = [] slightlyGreaterThanInset = 1.1 * absoluteInset muchGreaterThanLayerInset = 2.5 * absoluteInset isInInsetDirection = euclidean.isWiddershins( loop ) if inset < 0.0: isInInsetDirection = not isInInsetDirection centers = getCentersFromLoopDirection( not isInInsetDirection, loop, slightlyGreaterThanInset ) for center in centers: insetLoop = getSimplifiedInsetFromClockwiseLoop( center, absoluteInset ) if euclidean.isLargeSameDirection( insetLoop, center, muchGreaterThanLayerInset ): if euclidean.isPathInsideLoop( loop, insetLoop ) == isInInsetDirection: if inset > 0.0: insetLoop.reverse() insetLoops.append( insetLoop ) return insetLoops def getIntersectionAtInset( aheadComplex, behindComplex, inset ): "Get circle intersection loop at inset from segment." aheadComplexMinusBehindComplex = 0.5 * ( aheadComplex - behindComplex ) rotatedClockwiseQuarter = complex( aheadComplexMinusBehindComplex.imag, - aheadComplexMinusBehindComplex.real ) rotatedClockwiseQuarter *= inset / abs( rotatedClockwiseQuarter ) return aheadComplexMinusBehindComplex + behindComplex + rotatedClockwiseQuarter def getLoopsFromLoopsDirection( isWiddershins, loops ): "Get the loops going round in a given direction." directionalLoopComplexes = [] for loop in loops: if euclidean.isWiddershins( loop ) == isWiddershins: directionalLoopComplexes.append( loop ) return directionalLoopComplexes def getSimplifiedInsetFromClockwiseLoop( loop, radius ): "Get loop inset from clockwise loop, out from widdershins loop." return getWithoutIntersections( euclidean.getSimplifiedLoop( getInsetFromClockwiseLoop( loop, radius ), radius ) ) def getWithoutIntersections( loop ): "Get loop without intersections." lastLoopLength = len( loop ) while lastLoopLength > 3: removeIntersection( loop ) if len( loop ) == lastLoopLength: return loop lastLoopLength = len( loop ) return loop def isLoopIntersectingLoop( anotherLoop, loop ): "Determine if the a loop is intersecting another loop." for pointIndex in xrange( len( loop ) ): pointFirst = loop[ pointIndex ] pointSecond = loop[ ( pointIndex + 1 ) % len( loop ) ] segment = pointFirst - pointSecond normalizedSegment = euclidean.getNormalized( segment ) segmentYMirror = complex( normalizedSegment.real, - normalizedSegment.imag ) segmentFirstPoint = segmentYMirror * pointFirst segmentSecondPoint = segmentYMirror * pointSecond if euclidean.isLoopIntersectingInsideXSegment( anotherLoop, segmentFirstPoint.real, segmentSecondPoint.real, segmentYMirror, segmentFirstPoint.imag ): return True return False def removeIntersection( loop ): "Get loop without the first intersection." withoutIntersection = [] for pointIndex in xrange( len( loop ) ): behindComplex = loop[ ( pointIndex + len( loop ) - 1 ) % len( loop ) ] behindEndComplex = loop[ ( pointIndex + len( loop ) - 2 ) % len( loop ) ] behindMidpointComplex = 0.5 * ( behindComplex + behindEndComplex ) aheadComplex = loop[ pointIndex ] aheadEndComplex = loop[ ( pointIndex + 1 ) % len( loop ) ] aheadMidpointComplex = 0.5 * ( aheadComplex + aheadEndComplex ) normalizedSegment = behindComplex - behindMidpointComplex normalizedSegmentLength = abs( normalizedSegment ) if normalizedSegmentLength > 0.0: normalizedSegment /= normalizedSegmentLength segmentYMirror = complex( normalizedSegment.real, - normalizedSegment.imag ) behindRotated = segmentYMirror * behindComplex behindMidpointRotated = segmentYMirror * behindMidpointComplex aheadRotated = segmentYMirror * aheadComplex aheadMidpointRotated = segmentYMirror * aheadMidpointComplex y = behindRotated.imag isYAboveFirst = y > aheadRotated.imag isYAboveSecond = y > aheadMidpointRotated.imag if isYAboveFirst != isYAboveSecond: xIntersection = euclidean.getXIntersection( aheadRotated, aheadMidpointRotated, y ) if xIntersection > min( behindMidpointRotated.real, behindRotated.real ) and xIntersection < max( behindMidpointRotated.real, behindRotated.real ): intersectionPoint = normalizedSegment * complex( xIntersection, y ) loop[ ( pointIndex + len( loop ) - 1 ) % len( loop ) ] = intersectionPoint del loop[ pointIndex ] return class BoundingLoop: "A class to hold a bounding loop composed of a minimum complex, a maximum complex and an outset loop." def __cmp__( self, other ): "Get comparison in order to sort bounding loops in descending order of area." if self.area < other.area: return 1 if self.area > other.area: return - 1 return 0 def __repr__( self ): "Get the string representation of this bounding loop." return '%s, %s, %s' % ( self.minimum, self.maximum, self.loop ) def getFromLoop( self, loop ): "Get the bounding loop from a path." self.loop = loop self.maximum = euclidean.getMaximumFromPoints( loop ) self.minimum = euclidean.getMaximumFromPoints( loop ) return self def getOutsetBoundingLoop( self, outsetDistance ): "Outset the bounding rectangle and loop by a distance." outsetBoundingLoop = BoundingLoop() outsetBoundingLoop.maximum = self.maximum + complex( outsetDistance, outsetDistance ) outsetBoundingLoop.minimum = self.minimum - complex( outsetDistance, outsetDistance ) greaterThanOutsetDistance = 1.1 * outsetDistance centers = getCentersFromLoopDirection( True, self.loop, greaterThanOutsetDistance ) outsetBoundingLoop.loop = getSimplifiedInsetFromClockwiseLoop( centers[ 0 ], outsetDistance ) return outsetBoundingLoop def isEntirelyInsideAnother( self, anotherBoundingLoop ): "Determine if this bounding loop is entirely inside another bounding loop." if self.minimum.imag < anotherBoundingLoop.minimum.imag or self.minimum.real < anotherBoundingLoop.minimum.real: return False if self.maximum.imag > anotherBoundingLoop.maximum.imag or self.maximum.real > anotherBoundingLoop.maximum.real: return False for point in self.loop: if euclidean.getNumberOfIntersectionsToLeft( point, anotherBoundingLoop.loop ) % 2 == 0: return False return not isLoopIntersectingLoop( anotherBoundingLoop.loop, self.loop ) #later check for intersection on only acute angles def isOverlappingAnother( self, anotherBoundingLoop ): "Determine if this bounding loop is intersecting another bounding loop." if self.isRectangleMissingAnother( anotherBoundingLoop ): return False for point in self.loop: if euclidean.getNumberOfIntersectionsToLeft( point, anotherBoundingLoop.loop ) % 2 == 1: return True for point in anotherBoundingLoop.loop: if euclidean.getNumberOfIntersectionsToLeft( point, self.loop ) % 2 == 1: return True return isLoopIntersectingLoop( anotherBoundingLoop.loop, self.loop ) #later check for intersection on only acute angles def isRectangleMissingAnother( self, anotherBoundingLoop ): "Determine if the rectangle of this bounding loop is missing the rectangle of another bounding loop." if self.maximum.imag < anotherBoundingLoop.minimum.imag or self.maximum.real < anotherBoundingLoop.minimum.real: return True return self.minimum.imag > anotherBoundingLoop.maximum.imag or self.minimum.real > anotherBoundingLoop.maximum.real class CircleIntersection: "An intersection of two complex circles." def __init__( self, circleNodeAhead, index, circleNodeBehind ): self.circleNodeAhead = circleNodeAhead self.circleNodeBehind = circleNodeBehind self.index = index self.steppedOn = False def __repr__( self ): "Get the string representation of this CircleIntersection." return '%s, %s, %s, %s, %s' % ( self.index, self.getAbsolutePosition(), self.circleNodeBehind.index, self.circleNodeAhead.index, self.getCircleIntersectionAhead().index ) def addToList( self, circleIntersectionPath ): self.steppedOn = True circleIntersectionPath.append( self ) def getAbsolutePosition( self ): return self.getPositionRelativeToBehind() + self.circleNodeBehind.circle def getCircleIntersectionAhead( self ): circleIntersections = self.circleNodeAhead.circleIntersections circleIntersectionAhead = None smallestWiddershinsDot = 999999999.0 positionRelativeToAhead = self.getAbsolutePosition() - self.circleNodeAhead.circle positionRelativeToAhead = euclidean.getNormalized( positionRelativeToAhead ) for circleIntersection in circleIntersections: if not circleIntersection.steppedOn: circleIntersectionRelative = circleIntersection.getPositionRelativeToBehind() circleIntersectionRelative = euclidean.getNormalized( circleIntersectionRelative ) widdershinsDot = euclidean.getWiddershinsDotGiven( positionRelativeToAhead, circleIntersectionRelative ) if widdershinsDot < smallestWiddershinsDot: smallestWiddershinsDot = widdershinsDot circleIntersectionAhead = circleIntersection if circleIntersectionAhead == None: print( 'this should never happen, circleIntersectionAhead in intercircle is None' ) print( self.circleNodeAhead.circle ) for circleIntersection in circleIntersections: print( circleIntersection.circleNodeAhead.circle ) return circleIntersectionAhead def getPositionRelativeToBehind( self ): aheadMinusBehind = 0.5 * ( self.circleNodeAhead.circle - self.circleNodeBehind.circle ) radius = self.circleNodeAhead.radius halfChordWidth = math.sqrt( radius * radius - aheadMinusBehind.real * aheadMinusBehind.real - aheadMinusBehind.imag * aheadMinusBehind.imag ) rotatedClockwiseQuarter = complex( aheadMinusBehind.imag, - aheadMinusBehind.real ) if abs( rotatedClockwiseQuarter ) == 0: print( self.circleNodeAhead.circle ) print( self.circleNodeBehind.circle ) return aheadMinusBehind + rotatedClockwiseQuarter * ( halfChordWidth / abs( rotatedClockwiseQuarter ) ) def isWithinCircles( self, pixelTable, width ): absolutePosition = self.getAbsolutePosition() absolutePositionOverWidth = absolutePosition / width x = int( round( absolutePositionOverWidth.real ) ) y = int( round( absolutePositionOverWidth.imag ) ) squareValues = euclidean.getSquareValues( pixelTable, x, y ) for squareValue in squareValues: if abs( squareValue.circle - absolutePosition ) < self.circleNodeAhead.radius: if squareValue != self.circleNodeAhead and squareValue != self.circleNodeBehind: return True return False class CircleNode: "A complex node of complex circle intersections." def __init__( self, circle, index, radius ): self.circle = circle self.circleIntersections = [] self.diameter = radius + radius self.index = index self.radius = radius def __repr__( self ): "Get the string representation of this CircleNode." return '%s, %s' % ( self.index, self.circle ) def getWithinNodes( self, pixelTable, width ): circleOverWidth = self.circle / width x = int( round( circleOverWidth.real ) ) y = int( round( circleOverWidth.imag ) ) withinNodes = [] squareValues = euclidean.getSquareValues( pixelTable, x, y ) for squareValue in squareValues: if abs( self.circle - squareValue.circle ) < self.diameter: withinNodes.append( squareValue ) return withinNodes
gpl-2.0
jason0x43/jc-nest
nest.py
1
11609
#!/usr/bin/env python import json import requests import logging import ssl from datetime import datetime from os.path import exists, expanduser, dirname from os import makedirs, remove from requests.adapters import HTTPAdapter from requests.packages.urllib3.poolmanager import PoolManager LOG = logging.getLogger(__name__) default_cache_dir = expanduser('~/.nest') login_url = 'https://home.nest.com/user/login' user_agent = 'Nest/2.1.3 CFNetwork/548.0.4' class TlsAdapter(HTTPAdapter): def init_poolmanager(self, connections, maxsize, block=False): self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize, block=block, ssl_version=ssl.PROTOCOL_TLSv1) class FailedRequest(Exception): def __init__(self, message, response): super(FailedRequest, self).__init__(message) self.response = response class NotAuthenticated(Exception): def __init__(self, message): super(NotAuthenticated, self).__init__(message) class Nest(object): def __init__(self, id, structure): '''Initialize this Nest.''' self._id = str(id) self._structure = structure self._account = structure.account @property def account(self): return self._account @property def structure(self): return self._structure @property def name(self): return self.account.status['shared'][self.id]['name'] @property def id(self): return self._id @property def scale(self): return self.account.status['device'][self.id]['temperature_scale'] @property def ip(self): return self.account.status['metadata'][self.id]['last_ip'] @property def humidity(self): return self.account.status['device'][self.id]['current_humidity'] @property def temperature(self): temp = self.account.status['shared'][self.id]['current_temperature'] if self.scale == 'F': temp = (temp * 1.8) + 32 return temp @property def leaf(self): return self.account.status['device'][self.id]['leaf'] @property def mode(self): mode = self.account.status['device'][self.id][ 'current_schedule_mode'] return mode.lower() @mode.setter def mode(self, mode): mode = mode.upper() data = {'device': {self.id: {'current_schedule_mode': mode}}} self.account.request('POST', 'put', data=data) self.account.status['device'][self.id]['current_schedule_mode'] = mode @property def fan(self): return self.account.status['device'][self.id]['fan_mode'] @fan.setter def fan(self, mode): if mode not in ('auto', 'on'): raise Exception('Invalid fan mode "{}". Must be "auto" or ' '"on"'.format(mode)) data = {'device': {self.id: {'fan_mode': mode}}} self.account.request('POST', 'put', data=data) self.account.status['device'][self.id]['fan_mode'] = mode @property def target_temperature(self): shared = self.account.status['shared'][self.id] if self.mode == 'range': temp = [shared['target_temperature_low'], shared['target_temperature_high']] if self.scale == 'F': temp = [(t * 1.8) + 32 for t in temp] else: temp = shared['target_temperature'] if self.scale == 'F': temp = (temp * 1.8) + 32 return temp @target_temperature.setter def target_temperature(self, temp): if isinstance(temp, (list, tuple)): # temp is (low, high) lo_and_hi = [float(t) for t in temp] if lo_and_hi[1] - lo_and_hi[0] < 3.0: raise Exception('High and low temperatures are too close') if self.scale == 'F': lo_and_hi = [(t - 32) / 1.8 for t in lo_and_hi] data = { 'target_temperature_low': lo_and_hi[0], 'target_temperature_high': lo_and_hi[1], } else: temp = float(temp) if self.scale == 'F': temp = (temp - 32) / 1.8 data = { 'target_change_pending': True, 'target_temperature': temp } self.account.request('POST', 'put/shared.{}'.format(self.id), data=data) shared = self.account.status['shared'][self.id] if isinstance(temp, (list, tuple)): shared['target_temperature_low'] = lo_and_hi[0] shared['target_temperature_high'] = lo_and_hi[1] else: shared['target_temperature'] = temp class Structure(object): def __init__(self, structure_id, account): '''Initialize this structure.''' self._account = account self._id = structure_id self._nests = None @property def account(self): return self._account @property def id(self): return self._id @property def name(self): return self.account.status['structure'][self.id]['name'] @property def nests(self): if self._nests is None: nests = {} for dev in self.account.status['structure'][self.id]['devices']: id = dev.split('.')[1] nests[id] = Nest(id, self) self._nests = nests return self._nests @property def location(self): return self.account.status['structure'][self.id]['postal_code'] @property def weather(self): url = '{}{}'.format(self.account.session['urls']['weather_url'], self.location) return requests.get(url).json()[self.location] # away ############################### @property def away(self): return self.account.status['structure'][self.id]['away'] @away.setter def away(self, value): from time import time value = bool(value) data = { 'away_timestamp': int(time()), 'away': value, 'away_setter': 0 } self.account.request('POST', 'put/structure.{}'.format(self.id), data=data) self.account.status['structure'][self.id]['away'] = value class Account(object): def __init__(self, cache_dir=None): '''Initialize this nest interface.''' if cache_dir is None: cache_dir = default_cache_dir self._session_file = '{}/session.json'.format(cache_dir) self._status = None self._structures = None self._nests = None self._session = None @property def status(self): if self._status is None: r = self.request('GET', 'mobile/user.{}'.format(self.user_id)) self._status = r.json() return self._status @property def structures(self): if self._structures is None: structures = {} user_structs = self.status['user'][self.user_id]['structures'] LOG.debug('structs: %s', user_structs) for struct in user_structs: id = struct.split('.')[1] structures[id] = Structure(id, self) self._structures = structures return self._structures @property def nests(self): if self._nests is None: nests = {} for struct in self.structures.values(): for id, nest in struct.nests.items(): nests[id] = nest self._nests = nests return self._nests @property def user_id(self): return self.session['userid'] @property def session(self): return self._session @property def has_session(self): try: with open(self._session_file, 'rt') as sfile: self._session = json.load(sfile) expiry = datetime.strptime(self.session['expires_in'], '%a, %d-%b-%Y %H:%M:%S GMT') if datetime.utcnow() <= expiry: return True except Exception: LOG.exception('missing or corrupt session file') return False def clear_session(self): '''Delete the session file''' remove(self._session_file) def login(self, email, password): '''Login to the user's Nest account.''' # make the cache dir if it doesn't exist cache_dir = dirname(self._session_file) if not exists(cache_dir): makedirs(cache_dir) # authenticate with Nest and save the returned session data res = requests.post(login_url, {'username': email, 'password': password}) if res.status_code != 200: return False session = res.json() with open(self._session_file, 'wt') as sfile: json.dump(session, sfile, indent=2) self._session = session return True def request(self, method='GET', path='', data=None): '''GET from or POST to a user's Nest account. This function requires a valid session to exist. ''' # check that we have a valid session if not self.has_session: raise NotAuthenticated('No session -- login first') #from requests.utils import cookiejar_from_dict self._requestor = requests.Session() self._requestor.mount('https://', TlsAdapter()) self._requestor.headers.update({ 'User-Agent': user_agent, 'Authorization': 'Basic ' + self.session['access_token'], 'X-nl-user-id': self.session['userid'], 'X-nl-protocol-version': '1', 'Accept-Language': 'en-us', 'Connection': 'keep-alive', 'Accept': '*/*' }) base_url = '{}/v2'.format(self.session['urls']['transport_url']) url = '{}/{}'.format(base_url, path) if method == 'GET': LOG.info('GETting %s', url) # don't put headers it a status request if not url.endswith('.json'): r = self._requestor.get(url) else: r = requests.get(url) elif method == 'POST': if not isinstance(data, (str, unicode)): # convert data dicts to JSON strings data = json.dumps(data) r = self._requestor.post(url, data=data) else: raise Exception('Invalid method "{}"'.format(method)) if r.status_code != 200: raise FailedRequest('Request failed', r) return r if __name__ == '__main__': from argparse import ArgumentParser parser = ArgumentParser() parser.add_argument('property', help='Property to get or set', choices=('ip', 'status', 'temperature', 'humidity', 'target_temperature', 'away', 'leaf', 'weather')) parser.add_argument('value', nargs='?', help='Value to set') args = parser.parse_args() nest = Nest() from pprint import pprint if hasattr(nest, args.property): pprint(getattr(nest, args.property)) elif args.property in globals(): globals()[args.property]() if args.value: print 'Setting {} to {}'.format(args.property, args.value) setattr(nest, args.property, args.value)
mit
nhejazi/scikit-learn
sklearn/neighbors/setup.py
50
1460
import os def configuration(parent_package='', top_path=None): import numpy from numpy.distutils.misc_util import Configuration config = Configuration('neighbors', parent_package, top_path) libraries = [] if os.name == 'posix': libraries.append('m') config.add_extension('ball_tree', sources=['ball_tree.pyx'], include_dirs=[numpy.get_include()], libraries=libraries) config.add_extension('kd_tree', sources=['kd_tree.pyx'], include_dirs=[numpy.get_include()], libraries=libraries) config.add_extension('dist_metrics', sources=['dist_metrics.pyx'], include_dirs=[numpy.get_include(), os.path.join(numpy.get_include(), 'numpy')], libraries=libraries) config.add_extension('typedefs', sources=['typedefs.pyx'], include_dirs=[numpy.get_include()], libraries=libraries) config.add_extension("quad_tree", sources=["quad_tree.pyx"], include_dirs=[numpy.get_include()], libraries=libraries) config.add_subpackage('tests') return config
bsd-3-clause
feinheit/feincms-in-a-box
fbox/$PROJECT_NAME/tools/mail.py
2
1606
from __future__ import absolute_import, unicode_literals from django.core.mail import EmailMultiAlternatives from django.template.loader import TemplateDoesNotExist, render_to_string def render_to_mail(template, context, **kwargs): """ Renders a mail and returns the resulting ``EmailMultiAlternatives`` instance * ``template``: The base name of the text and HTML (optional) version of the mail. * ``context``: The context used to render the mail. This context instance should contain everything required. * Additional keyword arguments are passed to the ``EmailMultiAlternatives`` instantiation. Use those to specify the ``to``, ``headers`` etc. arguments. Usage example:: # Render the template myproject/hello_mail.txt (first non-empty line # contains the subject, third to last the body) and optionally the # template myproject/hello_mail.html containing the alternative HTML # representation. message = render_to_mail('myproject/hello_mail', {}, to=[email]) message.send() """ lines = iter(render_to_string('%s.txt' % template, context).splitlines()) subject = '' while True: line = next(lines) if line: subject = line break body = '\n'.join(lines).strip('\n') message = EmailMultiAlternatives(subject=subject, body=body, **kwargs) try: message.attach_alternative( render_to_string('%s.html' % template, context), 'text/html') except TemplateDoesNotExist: pass return message
bsd-3-clause
zwernberg/squatbot
config/settings/production.py
1
7518
# -*- coding: utf-8 -*- """ Production Configurations - Use Amazon's S3 for storing static files and uploaded media - Use mailgun to send emails - Use Redis for cache """ from __future__ import absolute_import, unicode_literals from boto.s3.connection import OrdinaryCallingFormat from django.utils import six from .common import * # noqa # SECRET CONFIGURATION # ------------------------------------------------------------------------------ # See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key # Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ SECRET_KEY = env('DJANGO_SECRET_KEY') # This ensures that Django will be able to detect a secure connection # properly on Heroku. SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') # SECURITY CONFIGURATION # ------------------------------------------------------------------------------ # See https://docs.djangoproject.com/en/1.9/ref/middleware/#module-django.middleware.security # and https://docs.djangoproject.com/ja/1.9/howto/deployment/checklist/#run-manage-py-check-deploy # set this to 60 seconds and then to 518400 when you can prove it works SECURE_HSTS_SECONDS = 60 SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool( 'DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True) SECURE_CONTENT_TYPE_NOSNIFF = env.bool( 'DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True) SECURE_BROWSER_XSS_FILTER = True SESSION_COOKIE_SECURE = True SESSION_COOKIE_HTTPONLY = True SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True) CSRF_COOKIE_SECURE = True CSRF_COOKIE_HTTPONLY = True X_FRAME_OPTIONS = 'DENY' # SITE CONFIGURATION # ------------------------------------------------------------------------------ # Hosts/domain names that are valid for this site # See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['squatbot.me']) # END SITE CONFIGURATION INSTALLED_APPS += ('gunicorn', ) # STORAGE CONFIGURATION # ------------------------------------------------------------------------------ # Uploaded Media Files # ------------------------ # See: http://django-storages.readthedocs.io/en/latest/index.html INSTALLED_APPS += ( 'storages', ) AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID') AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY') AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME') AWS_AUTO_CREATE_BUCKET = True AWS_QUERYSTRING_AUTH = False AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat() # AWS cache settings, don't change unless you know what you're doing: AWS_EXPIRY = 60 * 60 * 24 * 7 # TODO See: https://github.com/jschneier/django-storages/issues/47 # Revert the following and use str after the above-mentioned bug is fixed in # either django-storage-redux or boto AWS_HEADERS = { 'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % ( AWS_EXPIRY, AWS_EXPIRY)) } # URL that handles the media served from MEDIA_ROOT, used for managing # stored files. # See:http://stackoverflow.com/questions/10390244/ from storages.backends.s3boto import S3BotoStorage StaticRootS3BotoStorage = lambda: S3BotoStorage(location='static') MediaRootS3BotoStorage = lambda: S3BotoStorage(location='media') DEFAULT_FILE_STORAGE = 'config.settings.production.MediaRootS3BotoStorage' MEDIA_URL = 'https://s3.amazonaws.com/%s/media/' % AWS_STORAGE_BUCKET_NAME # Static Assets # ------------------------ STATIC_URL = 'https://s3.amazonaws.com/%s/static/' % AWS_STORAGE_BUCKET_NAME STATICFILES_STORAGE = 'config.settings.production.StaticRootS3BotoStorage' # See: https://github.com/antonagestam/collectfast # For Django 1.7+, 'collectfast' should come before # 'django.contrib.staticfiles' AWS_PRELOAD_METADATA = True INSTALLED_APPS = ('collectfast', ) + INSTALLED_APPS # EMAIL # ------------------------------------------------------------------------------ DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL', default='Squatbot <noreply@squatbot.me>') EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[Squatbot] ') SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL) # Anymail with Mailgun INSTALLED_APPS += ("anymail", ) ANYMAIL = { "MAILGUN_API_KEY": env('DJANGO_MAILGUN_API_KEY'), "MAILGUN_SENDER_DOMAIN": env('MAILGUN_SENDER_DOMAIN') } EMAIL_BACKEND = "anymail.backends.mailgun.MailgunBackend" # TEMPLATE CONFIGURATION # ------------------------------------------------------------------------------ # See: # https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader TEMPLATES[0]['OPTIONS']['loaders'] = [ ('django.template.loaders.cached.Loader', [ 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]), ] # DATABASE CONFIGURATION # ------------------------------------------------------------------------------ # Use the Heroku-style specification # Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ DATABASES['default'] = env.db('DATABASE_URL') # CACHING # ------------------------------------------------------------------------------ REDIS_LOCATION = '{0}/{1}'.format(env('REDIS_URL', default='redis://127.0.0.1:6379'), 0) # Heroku URL does not pass the DB number, so we parse it in CACHES = { 'default': { 'BACKEND': 'django_redis.cache.RedisCache', 'LOCATION': REDIS_LOCATION, 'OPTIONS': { 'CLIENT_CLASS': 'django_redis.client.DefaultClient', 'IGNORE_EXCEPTIONS': True, # mimics memcache behavior. # http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior } } } # LOGGING CONFIGURATION # ------------------------------------------------------------------------------ # See: https://docs.djangoproject.com/en/dev/ref/settings/#logging # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'formatters': { 'verbose': { 'format': '%(levelname)s %(asctime)s %(module)s ' '%(process)d %(thread)d %(message)s' }, }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' }, 'console': { 'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'verbose', }, }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True }, 'django.security.DisallowedHost': { 'level': 'ERROR', 'handlers': ['console', 'mail_admins'], 'propagate': True } } } # Custom Admin URL, use {% url 'admin:index' %} ADMIN_URL = env('DJANGO_ADMIN_URL') # Your production stuff: Below this line define 3rd party library settings # ------------------------------------------------------------------------------
mit
ahu-odoo/odoo
addons/gamification/wizard/update_goal.py
386
1848
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2013 OpenERP SA (<http://www.openerp.com>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/> # ############################################################################## from openerp.osv import fields, osv class goal_manual_wizard(osv.TransientModel): """Wizard to update a manual goal""" _name = 'gamification.goal.wizard' _columns = { 'goal_id': fields.many2one("gamification.goal", string='Goal', required=True), 'current': fields.float('Current'), } def action_update_current(self, cr, uid, ids, context=None): """Wizard action for updating the current value""" goal_obj = self.pool.get('gamification.goal') for wiz in self.browse(cr, uid, ids, context=context): towrite = { 'current': wiz.current, 'goal_id': wiz.goal_id.id, 'to_update': False, } goal_obj.write(cr, uid, [wiz.goal_id.id], towrite, context=context) goal_obj.update(cr, uid, [wiz.goal_id.id], context=context) return {}
agpl-3.0
codingkevin/suds
tests/test_input_parameters.py
1
24975
# -*- coding: utf-8 -*- # This program is free software; you can redistribute it and/or modify it under # the terms of the (LGPL) GNU Lesser General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Library Lesser General Public License # for more details at ( http://www.gnu.org/licenses/lgpl.html ). # # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software Foundation, Inc., # 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # written by: Jurko Gospodnetić ( jurko.gospodnetic@pke.hr ) """ Suds Python library web service operation input parameter related unit tests. Suds provides the user with an option to automatically 'hide' wrapper elements around simple types and allow the user to specify such parameters without explicitly creating those wrappers. For example: operation taking a parameter of type X, where X is a sequence containing only a single simple data type (e.g. string or integer) will be callable by directly passing it that internal simple data type value instead of first wrapping that value in an object of type X and then passing that wrapper object instead. Unit tests in this module make sure suds recognizes an operation's input parameters in different scenarios as expected. It does not deal with binding given argument values to an operation's input parameters or constructing an actual binding specific web service operation invocation request, although they may use such functionality as tools indicating that suds recognized an operation's input parameters correctly. """ import testutils if __name__ == "__main__": testutils.run_using_pytest(globals()) import suds import pytest class Element: """Represents elements in our XSD map test data.""" def __init__(self, name): self.name = name class XSDType: """Unwrapped parameter XSD type test data.""" def __init__(self, xsd, xsd_map): self.xsd = xsd self.xsd_map = xsd_map # Test data shared between different tests in this module. choice_choice = XSDType("""\ <xsd:complexType> <xsd:sequence> <xsd:choice> <xsd:element name="aString1" type="xsd:string" /> <xsd:element name="anInteger1" type="xsd:integer" /> </xsd:choice> <xsd:choice> <xsd:element name="aString2" type="xsd:string" /> <xsd:element name="anInteger2" type="xsd:integer" minOccurs="0" /> </xsd:choice> </xsd:sequence> </xsd:complexType>""", [ "complex_type", [ "sequence", [ "choice_1", [ Element("aString1"), Element("anInteger1")], "choice_2", [ Element("aString2"), Element("anInteger2")]]]]) choice_element_choice = XSDType("""\ <xsd:complexType> <xsd:sequence> <xsd:choice> <xsd:element name="aString1" type="xsd:string" /> <xsd:element name="anInteger1" type="xsd:integer" /> </xsd:choice> <xsd:element name="separator" type="xsd:string" /> <xsd:choice> <xsd:element name="aString2" type="xsd:string" /> <xsd:element name="anInteger2" type="xsd:integer" minOccurs="0" /> </xsd:choice> </xsd:sequence> </xsd:complexType>""", [ "complex_type", [ "sequence", [ "choice_1", [ Element("aString1"), Element("anInteger1")], Element("separator"), "choice_2", [ Element("aString2"), Element("anInteger2")]]]]) choice_simple_nonoptional = XSDType("""\ <xsd:complexType> <xsd:choice> <xsd:element name="aString" type="xsd:string" /> <xsd:element name="anInteger" type="xsd:integer" /> </xsd:choice> </xsd:complexType>""", [ "complex_type", [ "choice", [ Element("aString"), Element("anInteger")]]]) choice_with_element_and_two_element_sequence = XSDType("""\ <xsd:complexType> <xsd:choice> <xsd:element name="a" type="xsd:integer" /> <xsd:sequence> <xsd:element name="b1" type="xsd:integer" /> <xsd:element name="b2" type="xsd:integer" /> </xsd:sequence> </xsd:choice> </xsd:complexType>""", [ "complex_type", [ "choice", [ Element("a"), "sequence", [ Element("b1"), Element("b2")]]]]) empty_sequence = XSDType("""\ <xsd:complexType> <xsd:sequence /> </xsd:complexType>""", [ "complex_type", [ "sequence"]]) sequence_choice_with_element_and_two_element_sequence = XSDType("""\ <xsd:complexType> <xsd:sequence> <xsd:choice> <xsd:element name="a" type="xsd:integer" /> <xsd:sequence> <xsd:element name="b1" type="xsd:integer" /> <xsd:element name="b2" type="xsd:integer" /> </xsd:sequence> </xsd:choice> </xsd:sequence> </xsd:complexType>""", [ "complex_type", [ "sequence_1", [ "choice", [ Element("a"), "sequence_2", [ Element("b1"), Element("b2")]]]]]) sequence_with_five_elements = XSDType("""\ <xsd:complexType> <xsd:sequence> <xsd:element name="p1" type="xsd:string" /> <xsd:element name="p2" type="xsd:integer" /> <xsd:element name="p3" type="xsd:string" /> <xsd:element name="p4" type="xsd:integer" /> <xsd:element name="p5" type="xsd:string" /> </xsd:sequence> </xsd:complexType>""", [ "complex_type", [ "sequence", [ Element("p1"), Element("p2"), Element("p3"), Element("p4"), Element("p5")]]]) sequence_with_one_element = XSDType("""\ <xsd:complexType> <xsd:sequence> <xsd:element name="param" type="xsd:integer" /> </xsd:sequence> </xsd:complexType>""", [ "complex_type", [ "sequence", [ Element("param")]]]) sequence_with_two_elements = XSDType("""\ <xsd:complexType> <xsd:sequence> <xsd:element name="aString" type="xsd:string" /> <xsd:element name="anInteger" type="xsd:integer" /> </xsd:sequence> </xsd:complexType>""", [ "complex_type", [ "sequence", [ Element("aString"), Element("anInteger")]]]) class TestUnsupportedParameterDefinitions: """ Tests performed on WSDL schema's containing input parameter type definitions that can not be modeled using the currently implemented suds library input parameter definition structure. The tests included in this group, most of which are expected to fail, should serve as an illustration of what type of input parameter definitions still need to be better modeled. Once this has been done, they should be refactored into separate argument parsing, input parameter definition structure and binding specific request construction tests. """ def expect_error(self, expected_error_text, *args, **kwargs): """ Assert a test function call raises an expected TypeError exception. Caught exception is considered expected if its string representation matches the given expected error text. Expected error text may be given directly or as a list/tuple containing valid alternatives. Web service operation 'f' invoker is used as the default test function. An alternate test function may be specified using the 'test_function' keyword argument. """ try: test_function = kwargs.pop("test_function") except KeyError: test_function = self.service.f e = pytest.raises(TypeError, test_function, *args, **kwargs).value try: if expected_error_text.__class__ in (list, tuple): assert str(e) in expected_error_text else: assert str(e) == expected_error_text finally: del e # explicitly break circular reference chain in Python 3 def init_function_params(self, params, **kwargs): """ Initialize a test in this group with the given parameter definition. Constructs a complete WSDL schema based on the given function parameter definition (defines a single web service operation named 'f' by default), and creates a suds Client object to be used for testing suds's web service operation invocation. An alternate operation name may be given using the 'operation_name' keyword argument. May only be invoked once per test. """ input = '<xsd:element name="Wrapper">%s</xsd:element>' % (params,) assert not hasattr(self, "service") wsdl = testutils.wsdl(input, input="Wrapper", **kwargs) client = testutils.client_from_wsdl(wsdl, nosend=True) self.service = client.service @pytest.mark.parametrize("test_args_required", ( pytest.mark.xfail(reason="empty choice member items not supported")( True), False)) def test_choice_containing_an_empty_sequence(self, test_args_required): """ Test reporting extra input parameters passed to a function taking a choice parameter group containing an empty sequence subgroup. """ self.init_function_params("""\ <xsd:complexType> <xsd:choice> <xsd:element name="a" type="xsd:integer" /> <xsd:sequence> </xsd:sequence> </xsd:choice> </xsd:complexType>""") expected = "f() takes 0 to 1 positional arguments but 3 were given" if not test_args_required: expected = [expected, "f() takes 1 positional argument but 3 were given"] self.expect_error(expected, 1, None, None) @pytest.mark.parametrize("choice", ( # Explicitly marked as optional and containing only non-optional # elements. pytest.mark.xfail(reason="suds does not yet support minOccurs/" "maxOccurs attributes on all/choice/sequence order indicators")( """\ <xsd:complexType> <xsd:choice minOccurs="0"> <xsd:element name="aString" type="xsd:string" /> <xsd:element name="anInteger" type="xsd:integer" /> </xsd:choice> </xsd:complexType>"""), # Explicitly marked as optional and containing at least one # non-optional element. """\ <xsd:complexType> <xsd:choice minOccurs="0"> <xsd:element name="aString" type="xsd:string" minOccurs="0" /> <xsd:element name="anInteger" type="xsd:integer" /> </xsd:choice> </xsd:complexType>""", """\ <xsd:complexType> <xsd:choice minOccurs="0"> <xsd:element name="aString" type="xsd:string" /> <xsd:element name="anInteger" type="xsd:integer" minOccurs="0" /> </xsd:choice> </xsd:complexType>""", """\ <xsd:complexType> <xsd:choice minOccurs="0"> <xsd:element name="aString" type="xsd:string" minOccurs="0" /> <xsd:element name="anInteger" type="xsd:integer" minOccurs="0" /> </xsd:choice> </xsd:complexType>""")) def test_choice_explicitly_marked_as_optional(self, choice): """ Test reporting extra input parameters passed to a function taking a single optional choice parameter group. """ self.init_function_params(choice) expected = "f() takes 0 to 2 positional arguments but 3 were given" self.expect_error(expected, "one", None, 3) @pytest.mark.parametrize("part_name", ("uno", "due", "quatro")) def test_builtin_typed_element_parameter(part_name): """ Test correctly recognizing web service operation input structure defined by a built-in typed element. """ wsdl = suds.byte_str("""\ <?xml version='1.0' encoding='UTF-8'?> <wsdl:definitions targetNamespace="my-namespace" xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/" xmlns:ns="my-namespace" xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/"> <wsdl:types> <xsd:schema targetNamespace="my-namespace" elementFormDefault="qualified" attributeFormDefault="unqualified" xmlns:xsd="http://www.w3.org/2001/XMLSchema"> <xsd:element name="MyElement" type="xsd:integer" /> </xsd:schema> </wsdl:types> <wsdl:message name="fRequestMessage"> <wsdl:part name="%s" element="ns:MyElement" /> </wsdl:message> <wsdl:portType name="dummyPortType"> <wsdl:operation name="f"> <wsdl:input message="ns:fRequestMessage" /> </wsdl:operation> </wsdl:portType> <wsdl:binding name="dummy" type="ns:dummyPortType"> <soap:binding style="document" transport="http://schemas.xmlsoap.org/soap/http" /> <wsdl:operation name="f"> <soap:operation soapAction="my-soap-action" style="document" /> <wsdl:input><soap:body use="literal" /></wsdl:input> </wsdl:operation> </wsdl:binding> <wsdl:service name="dummy"> <wsdl:port name="dummy" binding="ns:dummy"> <soap:address location="unga-bunga-location" /> </wsdl:port> </wsdl:service> </wsdl:definitions>""" % (part_name,)) client = testutils.client_from_wsdl(wsdl, nosend=True) # Collect references to required WSDL model content. method = client.wsdl.services[0].ports[0].methods["f"] assert not method.soap.input.body.wrapped binding = method.binding.input assert binding.__class__ is suds.bindings.document.Document my_element = client.wsdl.schema.elements["MyElement", "my-namespace"] param_defs = binding.param_defs(method) _expect_params(param_defs, [("MyElement", my_element)]) @pytest.mark.parametrize("part_name", ("parameters", "pipi")) def test_explicitly_wrapped_parameter(part_name): """ Test correctly recognizing explicitly wrapped web service operation input structure which would otherwise be automatically unwrapped. """ input_schema = sequence_choice_with_element_and_two_element_sequence.xsd wsdl = _unwrappable_wsdl(part_name, input_schema) client = testutils.client_from_wsdl(wsdl, nosend=True, unwrap=False) # Collect references to required WSDL model content. method = client.wsdl.services[0].ports[0].methods["f"] assert not method.soap.input.body.wrapped binding = method.binding.input assert binding.__class__ is suds.bindings.document.Document wrapper = client.wsdl.schema.elements["Wrapper", "my-namespace"] param_defs = binding.param_defs(method) _expect_params(param_defs, [("Wrapper", wrapper)]) @pytest.mark.parametrize("param_names", ( [], ["parameters"], ["pipi"], ["fifi", "la", "fuff"])) def test_typed_parameters(param_names): """ Test correctly recognizing web service operation input structure defined with 0 or more typed input message part parameters. """ wsdl = ["""\ <?xml version='1.0' encoding='UTF-8'?> <wsdl:definitions targetNamespace="my-namespace" xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/" xmlns:ns="my-namespace" xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/"> <wsdl:types> <xsd:schema targetNamespace="my-namespace" elementFormDefault="qualified" attributeFormDefault="unqualified" xmlns:xsd="http://www.w3.org/2001/XMLSchema"> <xsd:complexType name="MyType"> <xsd:sequence> <xsd:element name="a" type="xsd:integer" /> </xsd:sequence> </xsd:complexType> </xsd:schema> </wsdl:types> <wsdl:message name="fRequestMessage">"""] for x in param_names: part_def = '\n <wsdl:part name="%s" type="ns:MyType" />' % (x,) wsdl.append(part_def) wsdl.append(""" </wsdl:message> <wsdl:portType name="dummyPortType"> <wsdl:operation name="f"> <wsdl:input message="ns:fRequestMessage" /> </wsdl:operation> </wsdl:portType> <wsdl:binding name="dummy" type="ns:dummyPortType"> <soap:binding style="document" transport="http://schemas.xmlsoap.org/soap/http" /> <wsdl:operation name="f"> <soap:operation soapAction="my-soap-action" style="document" /> <wsdl:input><soap:body use="literal" /></wsdl:input> </wsdl:operation> </wsdl:binding> <wsdl:service name="dummy"> <wsdl:port name="dummy" binding="ns:dummy"> <soap:address location="unga-bunga-location" /> </wsdl:port> </wsdl:service> </wsdl:definitions>""") wsdl = suds.byte_str("".join(wsdl)) client = testutils.client_from_wsdl(wsdl, nosend=True) # Collect references to required WSDL model content. method = client.wsdl.services[0].ports[0].methods["f"] assert not method.soap.input.body.wrapped binding = method.binding.input assert binding.__class__ is suds.bindings.document.Document my_type = client.wsdl.schema.types["MyType", "my-namespace"] # Construct expected parameter definitions. expected_param_defs = [ (param_name, [suds.bindings.binding.PartElement, param_name, my_type]) for param_name in param_names] param_defs = binding.param_defs(method) _expect_params(param_defs, expected_param_defs) @pytest.mark.parametrize("xsd_type", ( choice_choice, choice_element_choice, choice_simple_nonoptional, choice_with_element_and_two_element_sequence, empty_sequence, sequence_choice_with_element_and_two_element_sequence, sequence_with_five_elements, sequence_with_one_element, sequence_with_two_elements)) def test_unwrapped_parameter(xsd_type): """Test recognizing unwrapped web service operation input structures.""" input_schema = sequence_choice_with_element_and_two_element_sequence.xsd wsdl = _unwrappable_wsdl("part_name", input_schema) client = testutils.client_from_wsdl(wsdl, nosend=True) # Collect references to required WSDL model content. method = client.wsdl.services[0].ports[0].methods["f"] assert method.soap.input.body.wrapped binding = method.binding.input assert binding.__class__ is suds.bindings.document.Document wrapper = client.wsdl.schema.elements["Wrapper", "my-namespace"] # Construct expected parameter definitions. xsd_map = sequence_choice_with_element_and_two_element_sequence.xsd_map expected_param_defs = _parse_schema_model(wrapper, xsd_map) param_defs = binding.param_defs(method) _expect_params(param_defs, expected_param_defs) @pytest.mark.parametrize("part_name", ("parameters", "pipi")) def test_unwrapped_parameter_part_name(part_name): """ Unwrapped parameter's part name should not affect its parameter definition. """ input_schema = sequence_choice_with_element_and_two_element_sequence.xsd wsdl = _unwrappable_wsdl(part_name, input_schema) client = testutils.client_from_wsdl(wsdl, nosend=True) # Collect references to required WSDL model content. method = client.wsdl.services[0].ports[0].methods["f"] assert method.soap.input.body.wrapped binding = method.binding.input assert binding.__class__ is suds.bindings.document.Document wrapper = client.wsdl.schema.elements["Wrapper", "my-namespace"] # Construct expected parameter definitions. xsd_map = sequence_choice_with_element_and_two_element_sequence.xsd_map expected_param_defs = _parse_schema_model(wrapper, xsd_map) param_defs = binding.param_defs(method) _expect_params(param_defs, expected_param_defs) def _expect_params(param_defs, expected_param_defs): """ Assert the given parameter definition content. Given expected parameter definition content may contain the expected parameter type instance or it may contain a list/tuple describing the type instead. Type description list/tuple is expected to contain the following: 1. type object's class reference 2. type object's 'name' attribute value. 3. type object's resolved type instance reference """ assert param_defs.__class__ is list assert len(param_defs) == len(expected_param_defs) for pdef, expected_pdef in zip(param_defs, expected_param_defs): assert len(expected_pdef) in (2, 3), "bad test data" assert pdef[0] == expected_pdef[0] # name if expected_pdef[1].__class__ in (list, tuple): # type - class/name/type instance assert pdef[1].__class__ is expected_pdef[1][0] assert pdef[1].name == expected_pdef[1][1] assert pdef[1].resolve() is expected_pdef[1][2] else: assert pdef[1] is expected_pdef[1] # type - exact instance assert pdef[2:] == expected_pdef[2:] # ancestry - optional def _parse_schema_model(root, schema_model_map): """ Utility function for preparing the expected parameter definition structure based on an unwrapped input parameter's XSD type schema. Parses the XSD schema definition under a given XSD schema item and returns the expected parameter definition structure based on the given schema map. The schema map describes the expected hierarchy of items in the given XSD schema. Even though this information could be deduced from the XSD schema itself, that would require a much more complex implementation and this is supposed to be a simple testing utility. """ schema_items = {} param_defs = [] _parse_schema_model_r(schema_items, param_defs, [], root, schema_model_map) return param_defs def _parse_schema_model_r(schema_items, param_defs, ancestry, parent, schema_model_map): """Recursive implementation detail for _parse_schema_model().""" prev = None ancestry = list(ancestry) ancestry.append(parent) n = 0 for x in schema_model_map: if x.__class__ in (list, tuple): assert prev is not None, "bad schema model map" _parse_schema_model_r(schema_items, param_defs, ancestry, prev, x) continue item = parent.rawchildren[n] if isinstance(x, Element): x = x.name prev = None param_defs.append((x, item, ancestry)) else: assert isinstance(x, str), "bad schema model map" prev = item assert x not in schema_items, "duplicate schema map item names" schema_items[x] = item n += 1 assert len(parent.rawchildren) == n def _unwrappable_wsdl(part_name, param_schema): """ Return a WSDL schema byte string. The returned WSDL schema defines a single service definition with a single port containing a single function named 'f' taking automatically unwrappable input parameter using document/literal binding. The input parameter is defined as a single named input message part (name given via the 'part_name' argument) referencing an XSD schema element named 'Wrapper' located in the 'my-namespace' namespace. The wrapper element's type definition (XSD schema string) is given via the 'param_schema' argument. """ return suds.byte_str("""\ <?xml version='1.0' encoding='UTF-8'?> <wsdl:definitions targetNamespace="my-namespace" xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/" xmlns:ns="my-namespace" xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/"> <wsdl:types> <xsd:schema targetNamespace="my-namespace" elementFormDefault="qualified" attributeFormDefault="unqualified" xmlns:xsd="http://www.w3.org/2001/XMLSchema"> <xsd:element name="Wrapper"> %(param_schema)s </xsd:element> </xsd:schema> </wsdl:types> <wsdl:message name="fRequestMessage"> <wsdl:part name="%(part_name)s" element="ns:Wrapper" /> </wsdl:message> <wsdl:portType name="dummyPortType"> <wsdl:operation name="f"> <wsdl:input message="ns:fRequestMessage" /> </wsdl:operation> </wsdl:portType> <wsdl:binding name="dummy" type="ns:dummyPortType"> <soap:binding style="document" transport="http://schemas.xmlsoap.org/soap/http" /> <wsdl:operation name="f"> <soap:operation soapAction="my-soap-action" style="document" /> <wsdl:input><soap:body use="literal" /></wsdl:input> </wsdl:operation> </wsdl:binding> <wsdl:service name="dummy"> <wsdl:port name="dummy" binding="ns:dummy"> <soap:address location="unga-bunga-location" /> </wsdl:port> </wsdl:service> </wsdl:definitions>""" % {"param_schema":param_schema, "part_name":part_name})
lgpl-3.0
pymedusa/SickRage
medusa/providers/torrent/json/eztv.py
1
4401
# coding=utf-8 """Provider code for EZTV.""" from __future__ import unicode_literals import logging from medusa import tv from medusa.helper.common import convert_size from medusa.indexers.utils import mappings from medusa.logger.adapters.style import BraceAdapter from medusa.providers.torrent.torrent_provider import TorrentProvider log = BraceAdapter(logging.getLogger(__name__)) log.logger.addHandler(logging.NullHandler()) class EztvProvider(TorrentProvider): """EZTV Torrent provider.""" def __init__(self): """Initialize the class.""" super(EztvProvider, self).__init__('Eztv') # Credentials self.public = True # URLs self.url = 'https://eztv.io' self.urls = { 'api': 'https://eztv.io/api/get-torrents', } # Proper Strings # Miscellaneous Options # Cache self.cache = tv.Cache(self, min_time=15) def search(self, search_strings, age=0, ep_obj=None, **kwargs): """ Search a provider and parse the results. :param search_strings: A dict with mode (key) and the search value (value) :param age: Not used :param ep_obj: Not used :returns: A list of search results (structure) """ results = [] # Search Params search_params = {} for mode in search_strings: log.debug('Search mode: {0}', mode) for search_string in search_strings[mode]: if mode != 'RSS': imdb_id = self.series.externals.get(mappings[10]) if imdb_id: imdb_id = imdb_id[2:] # strip two tt's of id as they are not used search_params['imdb_id'] = imdb_id log.debug('Search string (IMDb ID): {imdb_id}', {'imdb_id': imdb_id}) else: log.debug('IMDb ID not found') continue search_url = self.urls['api'] data = self.session.get_json(search_url, params=search_params) if not data: log.debug('No data returned from provider') continue results += self.parse(data, mode) return results def parse(self, data, mode): """ Parse search results for items. :param data: The raw response from a search :param mode: The current mode used to search, e.g. RSS :return: A list of items found """ items = [] torrent_rows = data.get('torrents', {}) if not torrent_rows: log.debug('Data returned from provider does not contain any torrents') return items for row in torrent_rows: try: title = row.pop('title', None) download_url = row.pop('torrent_url', None) if not all([title, download_url]): continue seeders = row.pop('seeds', 0) leechers = row.pop('peers', 0) # Filter unseeded torrent if seeders < self.minseed: if mode != 'RSS': log.debug("Discarding torrent because it doesn't meet the" ' minimum seeders: {0}. Seeders: {1}', title, seeders) continue torrent_size = row.pop('size_bytes', None) size = convert_size(torrent_size, default=-1) pubdate_raw = row.pop('date_released_unix', None) pubdate = self.parse_pubdate(pubdate_raw, fromtimestamp=True) item = { 'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'pubdate': pubdate, } if mode != 'RSS': log.debug('Found result: {0} with {1} seeders and {2} leechers', title, seeders, leechers) items.append(item) except (AttributeError, TypeError, KeyError, ValueError, IndexError): log.exception('Failed parsing provider.') return items provider = EztvProvider()
gpl-3.0
hilts-vaughan/webrtc-components
client/node_modules/node-gyp/gyp/pylib/gyp/generator/android.py
542
45270
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # Notes: # # This generates makefiles suitable for inclusion into the Android build system # via an Android.mk file. It is based on make.py, the standard makefile # generator. # # The code below generates a separate .mk file for each target, but # all are sourced by the top-level GypAndroid.mk. This means that all # variables in .mk-files clobber one another, and furthermore that any # variables set potentially clash with other Android build system variables. # Try to avoid setting global variables where possible. import gyp import gyp.common import gyp.generator.make as make # Reuse global functions from make backend. import os import re import subprocess generator_default_variables = { 'OS': 'android', 'EXECUTABLE_PREFIX': '', 'EXECUTABLE_SUFFIX': '', 'STATIC_LIB_PREFIX': 'lib', 'SHARED_LIB_PREFIX': 'lib', 'STATIC_LIB_SUFFIX': '.a', 'SHARED_LIB_SUFFIX': '.so', 'INTERMEDIATE_DIR': '$(gyp_intermediate_dir)', 'SHARED_INTERMEDIATE_DIR': '$(gyp_shared_intermediate_dir)', 'PRODUCT_DIR': '$(gyp_shared_intermediate_dir)', 'SHARED_LIB_DIR': '$(builddir)/lib.$(TOOLSET)', 'LIB_DIR': '$(obj).$(TOOLSET)', 'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python. 'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python. 'RULE_INPUT_PATH': '$(RULE_SOURCES)', 'RULE_INPUT_EXT': '$(suffix $<)', 'RULE_INPUT_NAME': '$(notdir $<)', 'CONFIGURATION_NAME': '$(GYP_CONFIGURATION)', } # Make supports multiple toolsets generator_supports_multiple_toolsets = True # Generator-specific gyp specs. generator_additional_non_configuration_keys = [ # Boolean to declare that this target does not want its name mangled. 'android_unmangled_name', # Map of android build system variables to set. 'aosp_build_settings', ] generator_additional_path_sections = [] generator_extra_sources_for_rules = [] ALL_MODULES_FOOTER = """\ # "gyp_all_modules" is a concatenation of the "gyp_all_modules" targets from # all the included sub-makefiles. This is just here to clarify. gyp_all_modules: """ header = """\ # This file is generated by gyp; do not edit. """ # Map gyp target types to Android module classes. MODULE_CLASSES = { 'static_library': 'STATIC_LIBRARIES', 'shared_library': 'SHARED_LIBRARIES', 'executable': 'EXECUTABLES', } def IsCPPExtension(ext): return make.COMPILABLE_EXTENSIONS.get(ext) == 'cxx' def Sourceify(path): """Convert a path to its source directory form. The Android backend does not support options.generator_output, so this function is a noop.""" return path # Map from qualified target to path to output. # For Android, the target of these maps is a tuple ('static', 'modulename'), # ('dynamic', 'modulename'), or ('path', 'some/path') instead of a string, # since we link by module. target_outputs = {} # Map from qualified target to any linkable output. A subset # of target_outputs. E.g. when mybinary depends on liba, we want to # include liba in the linker line; when otherbinary depends on # mybinary, we just want to build mybinary first. target_link_deps = {} class AndroidMkWriter(object): """AndroidMkWriter packages up the writing of one target-specific Android.mk. Its only real entry point is Write(), and is mostly used for namespacing. """ def __init__(self, android_top_dir): self.android_top_dir = android_top_dir def Write(self, qualified_target, relative_target, base_path, output_filename, spec, configs, part_of_all, write_alias_target, sdk_version): """The main entry point: writes a .mk file for a single target. Arguments: qualified_target: target we're generating relative_target: qualified target name relative to the root base_path: path relative to source root we're building in, used to resolve target-relative paths output_filename: output .mk file name to write spec, configs: gyp info part_of_all: flag indicating this target is part of 'all' write_alias_target: flag indicating whether to create short aliases for this target sdk_version: what to emit for LOCAL_SDK_VERSION in output """ gyp.common.EnsureDirExists(output_filename) self.fp = open(output_filename, 'w') self.fp.write(header) self.qualified_target = qualified_target self.relative_target = relative_target self.path = base_path self.target = spec['target_name'] self.type = spec['type'] self.toolset = spec['toolset'] deps, link_deps = self.ComputeDeps(spec) # Some of the generation below can add extra output, sources, or # link dependencies. All of the out params of the functions that # follow use names like extra_foo. extra_outputs = [] extra_sources = [] self.android_class = MODULE_CLASSES.get(self.type, 'GYP') self.android_module = self.ComputeAndroidModule(spec) (self.android_stem, self.android_suffix) = self.ComputeOutputParts(spec) self.output = self.output_binary = self.ComputeOutput(spec) # Standard header. self.WriteLn('include $(CLEAR_VARS)\n') # Module class and name. self.WriteLn('LOCAL_MODULE_CLASS := ' + self.android_class) self.WriteLn('LOCAL_MODULE := ' + self.android_module) # Only emit LOCAL_MODULE_STEM if it's different to LOCAL_MODULE. # The library module classes fail if the stem is set. ComputeOutputParts # makes sure that stem == modulename in these cases. if self.android_stem != self.android_module: self.WriteLn('LOCAL_MODULE_STEM := ' + self.android_stem) self.WriteLn('LOCAL_MODULE_SUFFIX := ' + self.android_suffix) if self.toolset == 'host': self.WriteLn('LOCAL_IS_HOST_MODULE := true') self.WriteLn('LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)') else: self.WriteLn('LOCAL_MODULE_TARGET_ARCH := ' '$(TARGET_$(GYP_VAR_PREFIX)ARCH)') self.WriteLn('LOCAL_SDK_VERSION := %s' % sdk_version) # Grab output directories; needed for Actions and Rules. if self.toolset == 'host': self.WriteLn('gyp_intermediate_dir := ' '$(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))') else: self.WriteLn('gyp_intermediate_dir := ' '$(call local-intermediates-dir,,$(GYP_VAR_PREFIX))') self.WriteLn('gyp_shared_intermediate_dir := ' '$(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))') self.WriteLn() # List files this target depends on so that actions/rules/copies/sources # can depend on the list. # TODO: doesn't pull in things through transitive link deps; needed? target_dependencies = [x[1] for x in deps if x[0] == 'path'] self.WriteLn('# Make sure our deps are built first.') self.WriteList(target_dependencies, 'GYP_TARGET_DEPENDENCIES', local_pathify=True) # Actions must come first, since they can generate more OBJs for use below. if 'actions' in spec: self.WriteActions(spec['actions'], extra_sources, extra_outputs) # Rules must be early like actions. if 'rules' in spec: self.WriteRules(spec['rules'], extra_sources, extra_outputs) if 'copies' in spec: self.WriteCopies(spec['copies'], extra_outputs) # GYP generated outputs. self.WriteList(extra_outputs, 'GYP_GENERATED_OUTPUTS', local_pathify=True) # Set LOCAL_ADDITIONAL_DEPENDENCIES so that Android's build rules depend # on both our dependency targets and our generated files. self.WriteLn('# Make sure our deps and generated files are built first.') self.WriteLn('LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) ' '$(GYP_GENERATED_OUTPUTS)') self.WriteLn() # Sources. if spec.get('sources', []) or extra_sources: self.WriteSources(spec, configs, extra_sources) self.WriteTarget(spec, configs, deps, link_deps, part_of_all, write_alias_target) # Update global list of target outputs, used in dependency tracking. target_outputs[qualified_target] = ('path', self.output_binary) # Update global list of link dependencies. if self.type == 'static_library': target_link_deps[qualified_target] = ('static', self.android_module) elif self.type == 'shared_library': target_link_deps[qualified_target] = ('shared', self.android_module) self.fp.close() return self.android_module def WriteActions(self, actions, extra_sources, extra_outputs): """Write Makefile code for any 'actions' from the gyp input. extra_sources: a list that will be filled in with newly generated source files, if any extra_outputs: a list that will be filled in with any outputs of these actions (used to make other pieces dependent on these actions) """ for action in actions: name = make.StringToMakefileVariable('%s_%s' % (self.relative_target, action['action_name'])) self.WriteLn('### Rules for action "%s":' % action['action_name']) inputs = action['inputs'] outputs = action['outputs'] # Build up a list of outputs. # Collect the output dirs we'll need. dirs = set() for out in outputs: if not out.startswith('$'): print ('WARNING: Action for target "%s" writes output to local path ' '"%s".' % (self.target, out)) dir = os.path.split(out)[0] if dir: dirs.add(dir) if int(action.get('process_outputs_as_sources', False)): extra_sources += outputs # Prepare the actual command. command = gyp.common.EncodePOSIXShellList(action['action']) if 'message' in action: quiet_cmd = 'Gyp action: %s ($@)' % action['message'] else: quiet_cmd = 'Gyp action: %s ($@)' % name if len(dirs) > 0: command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command cd_action = 'cd $(gyp_local_path)/%s; ' % self.path command = cd_action + command # The makefile rules are all relative to the top dir, but the gyp actions # are defined relative to their containing dir. This replaces the gyp_* # variables for the action rule with an absolute version so that the # output goes in the right place. # Only write the gyp_* rules for the "primary" output (:1); # it's superfluous for the "extra outputs", and this avoids accidentally # writing duplicate dummy rules for those outputs. main_output = make.QuoteSpaces(self.LocalPathify(outputs[0])) self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output) self.WriteLn('%s: gyp_var_prefix := $(GYP_VAR_PREFIX)' % main_output) self.WriteLn('%s: gyp_intermediate_dir := ' '$(abspath $(gyp_intermediate_dir))' % main_output) self.WriteLn('%s: gyp_shared_intermediate_dir := ' '$(abspath $(gyp_shared_intermediate_dir))' % main_output) # Android's envsetup.sh adds a number of directories to the path including # the built host binary directory. This causes actions/rules invoked by # gyp to sometimes use these instead of system versions, e.g. bison. # The built host binaries may not be suitable, and can cause errors. # So, we remove them from the PATH using the ANDROID_BUILD_PATHS variable # set by envsetup. self.WriteLn('%s: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))' % main_output) # Don't allow spaces in input/output filenames, but make an exception for # filenames which start with '$(' since it's okay for there to be spaces # inside of make function/macro invocations. for input in inputs: if not input.startswith('$(') and ' ' in input: raise gyp.common.GypError( 'Action input filename "%s" in target %s contains a space' % (input, self.target)) for output in outputs: if not output.startswith('$(') and ' ' in output: raise gyp.common.GypError( 'Action output filename "%s" in target %s contains a space' % (output, self.target)) self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' % (main_output, ' '.join(map(self.LocalPathify, inputs)))) self.WriteLn('\t@echo "%s"' % quiet_cmd) self.WriteLn('\t$(hide)%s\n' % command) for output in outputs[1:]: # Make each output depend on the main output, with an empty command # to force make to notice that the mtime has changed. self.WriteLn('%s: %s ;' % (self.LocalPathify(output), main_output)) extra_outputs += outputs self.WriteLn() self.WriteLn() def WriteRules(self, rules, extra_sources, extra_outputs): """Write Makefile code for any 'rules' from the gyp input. extra_sources: a list that will be filled in with newly generated source files, if any extra_outputs: a list that will be filled in with any outputs of these rules (used to make other pieces dependent on these rules) """ if len(rules) == 0: return for rule in rules: if len(rule.get('rule_sources', [])) == 0: continue name = make.StringToMakefileVariable('%s_%s' % (self.relative_target, rule['rule_name'])) self.WriteLn('\n### Generated for rule "%s":' % name) self.WriteLn('# "%s":' % rule) inputs = rule.get('inputs') for rule_source in rule.get('rule_sources', []): (rule_source_dirname, rule_source_basename) = os.path.split(rule_source) (rule_source_root, rule_source_ext) = \ os.path.splitext(rule_source_basename) outputs = [self.ExpandInputRoot(out, rule_source_root, rule_source_dirname) for out in rule['outputs']] dirs = set() for out in outputs: if not out.startswith('$'): print ('WARNING: Rule for target %s writes output to local path %s' % (self.target, out)) dir = os.path.dirname(out) if dir: dirs.add(dir) extra_outputs += outputs if int(rule.get('process_outputs_as_sources', False)): extra_sources.extend(outputs) components = [] for component in rule['action']: component = self.ExpandInputRoot(component, rule_source_root, rule_source_dirname) if '$(RULE_SOURCES)' in component: component = component.replace('$(RULE_SOURCES)', rule_source) components.append(component) command = gyp.common.EncodePOSIXShellList(components) cd_action = 'cd $(gyp_local_path)/%s; ' % self.path command = cd_action + command if dirs: command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command # We set up a rule to build the first output, and then set up # a rule for each additional output to depend on the first. outputs = map(self.LocalPathify, outputs) main_output = outputs[0] self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output) self.WriteLn('%s: gyp_var_prefix := $(GYP_VAR_PREFIX)' % main_output) self.WriteLn('%s: gyp_intermediate_dir := ' '$(abspath $(gyp_intermediate_dir))' % main_output) self.WriteLn('%s: gyp_shared_intermediate_dir := ' '$(abspath $(gyp_shared_intermediate_dir))' % main_output) # See explanation in WriteActions. self.WriteLn('%s: export PATH := ' '$(subst $(ANDROID_BUILD_PATHS),,$(PATH))' % main_output) main_output_deps = self.LocalPathify(rule_source) if inputs: main_output_deps += ' ' main_output_deps += ' '.join([self.LocalPathify(f) for f in inputs]) self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' % (main_output, main_output_deps)) self.WriteLn('\t%s\n' % command) for output in outputs[1:]: # Make each output depend on the main output, with an empty command # to force make to notice that the mtime has changed. self.WriteLn('%s: %s ;' % (output, main_output)) self.WriteLn() self.WriteLn() def WriteCopies(self, copies, extra_outputs): """Write Makefile code for any 'copies' from the gyp input. extra_outputs: a list that will be filled in with any outputs of this action (used to make other pieces dependent on this action) """ self.WriteLn('### Generated for copy rule.') variable = make.StringToMakefileVariable(self.relative_target + '_copies') outputs = [] for copy in copies: for path in copy['files']: # The Android build system does not allow generation of files into the # source tree. The destination should start with a variable, which will # typically be $(gyp_intermediate_dir) or # $(gyp_shared_intermediate_dir). Note that we can't use an assertion # because some of the gyp tests depend on this. if not copy['destination'].startswith('$'): print ('WARNING: Copy rule for target %s writes output to ' 'local path %s' % (self.target, copy['destination'])) # LocalPathify() calls normpath, stripping trailing slashes. path = Sourceify(self.LocalPathify(path)) filename = os.path.split(path)[1] output = Sourceify(self.LocalPathify(os.path.join(copy['destination'], filename))) self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES) | $(ACP)' % (output, path)) self.WriteLn('\t@echo Copying: $@') self.WriteLn('\t$(hide) mkdir -p $(dir $@)') self.WriteLn('\t$(hide) $(ACP) -rpf $< $@') self.WriteLn() outputs.append(output) self.WriteLn('%s = %s' % (variable, ' '.join(map(make.QuoteSpaces, outputs)))) extra_outputs.append('$(%s)' % variable) self.WriteLn() def WriteSourceFlags(self, spec, configs): """Write out the flags and include paths used to compile source files for the current target. Args: spec, configs: input from gyp. """ for configname, config in sorted(configs.iteritems()): extracted_includes = [] self.WriteLn('\n# Flags passed to both C and C++ files.') cflags, includes_from_cflags = self.ExtractIncludesFromCFlags( config.get('cflags', []) + config.get('cflags_c', [])) extracted_includes.extend(includes_from_cflags) self.WriteList(cflags, 'MY_CFLAGS_%s' % configname) self.WriteList(config.get('defines'), 'MY_DEFS_%s' % configname, prefix='-D', quoter=make.EscapeCppDefine) self.WriteLn('\n# Include paths placed before CFLAGS/CPPFLAGS') includes = list(config.get('include_dirs', [])) includes.extend(extracted_includes) includes = map(Sourceify, map(self.LocalPathify, includes)) includes = self.NormalizeIncludePaths(includes) self.WriteList(includes, 'LOCAL_C_INCLUDES_%s' % configname) self.WriteLn('\n# Flags passed to only C++ (and not C) files.') self.WriteList(config.get('cflags_cc'), 'LOCAL_CPPFLAGS_%s' % configname) self.WriteLn('\nLOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) ' '$(MY_DEFS_$(GYP_CONFIGURATION))') # Undefine ANDROID for host modules # TODO: the source code should not use macro ANDROID to tell if it's host # or target module. if self.toolset == 'host': self.WriteLn('# Undefine ANDROID for host modules') self.WriteLn('LOCAL_CFLAGS += -UANDROID') self.WriteLn('LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) ' '$(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))') self.WriteLn('LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))') # Android uses separate flags for assembly file invocations, but gyp expects # the same CFLAGS to be applied: self.WriteLn('LOCAL_ASFLAGS := $(LOCAL_CFLAGS)') def WriteSources(self, spec, configs, extra_sources): """Write Makefile code for any 'sources' from the gyp input. These are source files necessary to build the current target. We need to handle shared_intermediate directory source files as a special case by copying them to the intermediate directory and treating them as a genereated sources. Otherwise the Android build rules won't pick them up. Args: spec, configs: input from gyp. extra_sources: Sources generated from Actions or Rules. """ sources = filter(make.Compilable, spec.get('sources', [])) generated_not_sources = [x for x in extra_sources if not make.Compilable(x)] extra_sources = filter(make.Compilable, extra_sources) # Determine and output the C++ extension used by these sources. # We simply find the first C++ file and use that extension. all_sources = sources + extra_sources local_cpp_extension = '.cpp' for source in all_sources: (root, ext) = os.path.splitext(source) if IsCPPExtension(ext): local_cpp_extension = ext break if local_cpp_extension != '.cpp': self.WriteLn('LOCAL_CPP_EXTENSION := %s' % local_cpp_extension) # We need to move any non-generated sources that are coming from the # shared intermediate directory out of LOCAL_SRC_FILES and put them # into LOCAL_GENERATED_SOURCES. We also need to move over any C++ files # that don't match our local_cpp_extension, since Android will only # generate Makefile rules for a single LOCAL_CPP_EXTENSION. local_files = [] for source in sources: (root, ext) = os.path.splitext(source) if '$(gyp_shared_intermediate_dir)' in source: extra_sources.append(source) elif '$(gyp_intermediate_dir)' in source: extra_sources.append(source) elif IsCPPExtension(ext) and ext != local_cpp_extension: extra_sources.append(source) else: local_files.append(os.path.normpath(os.path.join(self.path, source))) # For any generated source, if it is coming from the shared intermediate # directory then we add a Make rule to copy them to the local intermediate # directory first. This is because the Android LOCAL_GENERATED_SOURCES # must be in the local module intermediate directory for the compile rules # to work properly. If the file has the wrong C++ extension, then we add # a rule to copy that to intermediates and use the new version. final_generated_sources = [] # If a source file gets copied, we still need to add the orginal source # directory as header search path, for GCC searches headers in the # directory that contains the source file by default. origin_src_dirs = [] for source in extra_sources: local_file = source if not '$(gyp_intermediate_dir)/' in local_file: basename = os.path.basename(local_file) local_file = '$(gyp_intermediate_dir)/' + basename (root, ext) = os.path.splitext(local_file) if IsCPPExtension(ext) and ext != local_cpp_extension: local_file = root + local_cpp_extension if local_file != source: self.WriteLn('%s: %s' % (local_file, self.LocalPathify(source))) self.WriteLn('\tmkdir -p $(@D); cp $< $@') origin_src_dirs.append(os.path.dirname(source)) final_generated_sources.append(local_file) # We add back in all of the non-compilable stuff to make sure that the # make rules have dependencies on them. final_generated_sources.extend(generated_not_sources) self.WriteList(final_generated_sources, 'LOCAL_GENERATED_SOURCES') origin_src_dirs = gyp.common.uniquer(origin_src_dirs) origin_src_dirs = map(Sourceify, map(self.LocalPathify, origin_src_dirs)) self.WriteList(origin_src_dirs, 'GYP_COPIED_SOURCE_ORIGIN_DIRS') self.WriteList(local_files, 'LOCAL_SRC_FILES') # Write out the flags used to compile the source; this must be done last # so that GYP_COPIED_SOURCE_ORIGIN_DIRS can be used as an include path. self.WriteSourceFlags(spec, configs) def ComputeAndroidModule(self, spec): """Return the Android module name used for a gyp spec. We use the complete qualified target name to avoid collisions between duplicate targets in different directories. We also add a suffix to distinguish gyp-generated module names. """ if int(spec.get('android_unmangled_name', 0)): assert self.type != 'shared_library' or self.target.startswith('lib') return self.target if self.type == 'shared_library': # For reasons of convention, the Android build system requires that all # shared library modules are named 'libfoo' when generating -l flags. prefix = 'lib_' else: prefix = '' if spec['toolset'] == 'host': suffix = '_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp' else: suffix = '_gyp' if self.path: middle = make.StringToMakefileVariable('%s_%s' % (self.path, self.target)) else: middle = make.StringToMakefileVariable(self.target) return ''.join([prefix, middle, suffix]) def ComputeOutputParts(self, spec): """Return the 'output basename' of a gyp spec, split into filename + ext. Android libraries must be named the same thing as their module name, otherwise the linker can't find them, so product_name and so on must be ignored if we are building a library, and the "lib" prepending is not done for Android. """ assert self.type != 'loadable_module' # TODO: not supported? target = spec['target_name'] target_prefix = '' target_ext = '' if self.type == 'static_library': target = self.ComputeAndroidModule(spec) target_ext = '.a' elif self.type == 'shared_library': target = self.ComputeAndroidModule(spec) target_ext = '.so' elif self.type == 'none': target_ext = '.stamp' elif self.type != 'executable': print ("ERROR: What output file should be generated?", "type", self.type, "target", target) if self.type != 'static_library' and self.type != 'shared_library': target_prefix = spec.get('product_prefix', target_prefix) target = spec.get('product_name', target) product_ext = spec.get('product_extension') if product_ext: target_ext = '.' + product_ext target_stem = target_prefix + target return (target_stem, target_ext) def ComputeOutputBasename(self, spec): """Return the 'output basename' of a gyp spec. E.g., the loadable module 'foobar' in directory 'baz' will produce 'libfoobar.so' """ return ''.join(self.ComputeOutputParts(spec)) def ComputeOutput(self, spec): """Return the 'output' (full output path) of a gyp spec. E.g., the loadable module 'foobar' in directory 'baz' will produce '$(obj)/baz/libfoobar.so' """ if self.type == 'executable': # We install host executables into shared_intermediate_dir so they can be # run by gyp rules that refer to PRODUCT_DIR. path = '$(gyp_shared_intermediate_dir)' elif self.type == 'shared_library': if self.toolset == 'host': path = '$($(GYP_HOST_VAR_PREFIX)HOST_OUT_INTERMEDIATE_LIBRARIES)' else: path = '$($(GYP_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)' else: # Other targets just get built into their intermediate dir. if self.toolset == 'host': path = ('$(call intermediates-dir-for,%s,%s,true,,' '$(GYP_HOST_VAR_PREFIX))' % (self.android_class, self.android_module)) else: path = ('$(call intermediates-dir-for,%s,%s,,,$(GYP_VAR_PREFIX))' % (self.android_class, self.android_module)) assert spec.get('product_dir') is None # TODO: not supported? return os.path.join(path, self.ComputeOutputBasename(spec)) def NormalizeIncludePaths(self, include_paths): """ Normalize include_paths. Convert absolute paths to relative to the Android top directory. Args: include_paths: A list of unprocessed include paths. Returns: A list of normalized include paths. """ normalized = [] for path in include_paths: if path[0] == '/': path = gyp.common.RelativePath(path, self.android_top_dir) normalized.append(path) return normalized def ExtractIncludesFromCFlags(self, cflags): """Extract includes "-I..." out from cflags Args: cflags: A list of compiler flags, which may be mixed with "-I.." Returns: A tuple of lists: (clean_clfags, include_paths). "-I.." is trimmed. """ clean_cflags = [] include_paths = [] for flag in cflags: if flag.startswith('-I'): include_paths.append(flag[2:]) else: clean_cflags.append(flag) return (clean_cflags, include_paths) def FilterLibraries(self, libraries): """Filter the 'libraries' key to separate things that shouldn't be ldflags. Library entries that look like filenames should be converted to android module names instead of being passed to the linker as flags. Args: libraries: the value of spec.get('libraries') Returns: A tuple (static_lib_modules, dynamic_lib_modules, ldflags) """ static_lib_modules = [] dynamic_lib_modules = [] ldflags = [] for libs in libraries: # Libs can have multiple words. for lib in libs.split(): # Filter the system libraries, which are added by default by the Android # build system. if (lib == '-lc' or lib == '-lstdc++' or lib == '-lm' or lib.endswith('libgcc.a')): continue match = re.search(r'([^/]+)\.a$', lib) if match: static_lib_modules.append(match.group(1)) continue match = re.search(r'([^/]+)\.so$', lib) if match: dynamic_lib_modules.append(match.group(1)) continue if lib.startswith('-l'): ldflags.append(lib) return (static_lib_modules, dynamic_lib_modules, ldflags) def ComputeDeps(self, spec): """Compute the dependencies of a gyp spec. Returns a tuple (deps, link_deps), where each is a list of filenames that will need to be put in front of make for either building (deps) or linking (link_deps). """ deps = [] link_deps = [] if 'dependencies' in spec: deps.extend([target_outputs[dep] for dep in spec['dependencies'] if target_outputs[dep]]) for dep in spec['dependencies']: if dep in target_link_deps: link_deps.append(target_link_deps[dep]) deps.extend(link_deps) return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps)) def WriteTargetFlags(self, spec, configs, link_deps): """Write Makefile code to specify the link flags and library dependencies. spec, configs: input from gyp. link_deps: link dependency list; see ComputeDeps() """ # Libraries (i.e. -lfoo) # These must be included even for static libraries as some of them provide # implicit include paths through the build system. libraries = gyp.common.uniquer(spec.get('libraries', [])) static_libs, dynamic_libs, ldflags_libs = self.FilterLibraries(libraries) if self.type != 'static_library': for configname, config in sorted(configs.iteritems()): ldflags = list(config.get('ldflags', [])) self.WriteLn('') self.WriteList(ldflags, 'LOCAL_LDFLAGS_%s' % configname) self.WriteList(ldflags_libs, 'LOCAL_GYP_LIBS') self.WriteLn('LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION)) ' '$(LOCAL_GYP_LIBS)') # Link dependencies (i.e. other gyp targets this target depends on) # These need not be included for static libraries as within the gyp build # we do not use the implicit include path mechanism. if self.type != 'static_library': static_link_deps = [x[1] for x in link_deps if x[0] == 'static'] shared_link_deps = [x[1] for x in link_deps if x[0] == 'shared'] else: static_link_deps = [] shared_link_deps = [] # Only write the lists if they are non-empty. if static_libs or static_link_deps: self.WriteLn('') self.WriteList(static_libs + static_link_deps, 'LOCAL_STATIC_LIBRARIES') self.WriteLn('# Enable grouping to fix circular references') self.WriteLn('LOCAL_GROUP_STATIC_LIBRARIES := true') if dynamic_libs or shared_link_deps: self.WriteLn('') self.WriteList(dynamic_libs + shared_link_deps, 'LOCAL_SHARED_LIBRARIES') def WriteTarget(self, spec, configs, deps, link_deps, part_of_all, write_alias_target): """Write Makefile code to produce the final target of the gyp spec. spec, configs: input from gyp. deps, link_deps: dependency lists; see ComputeDeps() part_of_all: flag indicating this target is part of 'all' write_alias_target: flag indicating whether to create short aliases for this target """ self.WriteLn('### Rules for final target.') if self.type != 'none': self.WriteTargetFlags(spec, configs, link_deps) settings = spec.get('aosp_build_settings', {}) if settings: self.WriteLn('### Set directly by aosp_build_settings.') for k, v in settings.iteritems(): if isinstance(v, list): self.WriteList(v, k) else: self.WriteLn('%s := %s' % (k, make.QuoteIfNecessary(v))) self.WriteLn('') # Add to the set of targets which represent the gyp 'all' target. We use the # name 'gyp_all_modules' as the Android build system doesn't allow the use # of the Make target 'all' and because 'all_modules' is the equivalent of # the Make target 'all' on Android. if part_of_all and write_alias_target: self.WriteLn('# Add target alias to "gyp_all_modules" target.') self.WriteLn('.PHONY: gyp_all_modules') self.WriteLn('gyp_all_modules: %s' % self.android_module) self.WriteLn('') # Add an alias from the gyp target name to the Android module name. This # simplifies manual builds of the target, and is required by the test # framework. if self.target != self.android_module and write_alias_target: self.WriteLn('# Alias gyp target name.') self.WriteLn('.PHONY: %s' % self.target) self.WriteLn('%s: %s' % (self.target, self.android_module)) self.WriteLn('') # Add the command to trigger build of the target type depending # on the toolset. Ex: BUILD_STATIC_LIBRARY vs. BUILD_HOST_STATIC_LIBRARY # NOTE: This has to come last! modifier = '' if self.toolset == 'host': modifier = 'HOST_' if self.type == 'static_library': self.WriteLn('include $(BUILD_%sSTATIC_LIBRARY)' % modifier) elif self.type == 'shared_library': self.WriteLn('LOCAL_PRELINK_MODULE := false') self.WriteLn('include $(BUILD_%sSHARED_LIBRARY)' % modifier) elif self.type == 'executable': # Executables are for build and test purposes only, so they're installed # to a directory that doesn't get included in the system image. self.WriteLn('LOCAL_MODULE_PATH := $(gyp_shared_intermediate_dir)') self.WriteLn('include $(BUILD_%sEXECUTABLE)' % modifier) else: self.WriteLn('LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp') self.WriteLn('LOCAL_UNINSTALLABLE_MODULE := true') if self.toolset == 'target': self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_VAR_PREFIX)') else: self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_HOST_VAR_PREFIX)') self.WriteLn() self.WriteLn('include $(BUILD_SYSTEM)/base_rules.mk') self.WriteLn() self.WriteLn('$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)') self.WriteLn('\t$(hide) echo "Gyp timestamp: $@"') self.WriteLn('\t$(hide) mkdir -p $(dir $@)') self.WriteLn('\t$(hide) touch $@') self.WriteLn() self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX :=') def WriteList(self, value_list, variable=None, prefix='', quoter=make.QuoteIfNecessary, local_pathify=False): """Write a variable definition that is a list of values. E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out foo = blaha blahb but in a pretty-printed style. """ values = '' if value_list: value_list = [quoter(prefix + l) for l in value_list] if local_pathify: value_list = [self.LocalPathify(l) for l in value_list] values = ' \\\n\t' + ' \\\n\t'.join(value_list) self.fp.write('%s :=%s\n\n' % (variable, values)) def WriteLn(self, text=''): self.fp.write(text + '\n') def LocalPathify(self, path): """Convert a subdirectory-relative path into a normalized path which starts with the make variable $(LOCAL_PATH) (i.e. the top of the project tree). Absolute paths, or paths that contain variables, are just normalized.""" if '$(' in path or os.path.isabs(path): # path is not a file in the project tree in this case, but calling # normpath is still important for trimming trailing slashes. return os.path.normpath(path) local_path = os.path.join('$(LOCAL_PATH)', self.path, path) local_path = os.path.normpath(local_path) # Check that normalizing the path didn't ../ itself out of $(LOCAL_PATH) # - i.e. that the resulting path is still inside the project tree. The # path may legitimately have ended up containing just $(LOCAL_PATH), though, # so we don't look for a slash. assert local_path.startswith('$(LOCAL_PATH)'), ( 'Path %s attempts to escape from gyp path %s !)' % (path, self.path)) return local_path def ExpandInputRoot(self, template, expansion, dirname): if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template: return template path = template % { 'INPUT_ROOT': expansion, 'INPUT_DIRNAME': dirname, } return os.path.normpath(path) def PerformBuild(data, configurations, params): # The android backend only supports the default configuration. options = params['options'] makefile = os.path.abspath(os.path.join(options.toplevel_dir, 'GypAndroid.mk')) env = dict(os.environ) env['ONE_SHOT_MAKEFILE'] = makefile arguments = ['make', '-C', os.environ['ANDROID_BUILD_TOP'], 'gyp_all_modules'] print 'Building: %s' % arguments subprocess.check_call(arguments, env=env) def GenerateOutput(target_list, target_dicts, data, params): options = params['options'] generator_flags = params.get('generator_flags', {}) builddir_name = generator_flags.get('output_dir', 'out') limit_to_target_all = generator_flags.get('limit_to_target_all', False) write_alias_targets = generator_flags.get('write_alias_targets', True) sdk_version = generator_flags.get('aosp_sdk_version', 19) android_top_dir = os.environ.get('ANDROID_BUILD_TOP') assert android_top_dir, '$ANDROID_BUILD_TOP not set; you need to run lunch.' def CalculateMakefilePath(build_file, base_name): """Determine where to write a Makefile for a given gyp file.""" # Paths in gyp files are relative to the .gyp file, but we want # paths relative to the source root for the master makefile. Grab # the path of the .gyp file as the base to relativize against. # E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp". base_path = gyp.common.RelativePath(os.path.dirname(build_file), options.depth) # We write the file in the base_path directory. output_file = os.path.join(options.depth, base_path, base_name) assert not options.generator_output, ( 'The Android backend does not support options.generator_output.') base_path = gyp.common.RelativePath(os.path.dirname(build_file), options.toplevel_dir) return base_path, output_file # TODO: search for the first non-'Default' target. This can go # away when we add verification that all targets have the # necessary configurations. default_configuration = None toolsets = set([target_dicts[target]['toolset'] for target in target_list]) for target in target_list: spec = target_dicts[target] if spec['default_configuration'] != 'Default': default_configuration = spec['default_configuration'] break if not default_configuration: default_configuration = 'Default' srcdir = '.' makefile_name = 'GypAndroid' + options.suffix + '.mk' makefile_path = os.path.join(options.toplevel_dir, makefile_name) assert not options.generator_output, ( 'The Android backend does not support options.generator_output.') gyp.common.EnsureDirExists(makefile_path) root_makefile = open(makefile_path, 'w') root_makefile.write(header) # We set LOCAL_PATH just once, here, to the top of the project tree. This # allows all the other paths we use to be relative to the Android.mk file, # as the Android build system expects. root_makefile.write('\nLOCAL_PATH := $(call my-dir)\n') # Find the list of targets that derive from the gyp file(s) being built. needed_targets = set() for build_file in params['build_files']: for target in gyp.common.AllTargets(target_list, target_dicts, build_file): needed_targets.add(target) build_files = set() include_list = set() android_modules = {} for qualified_target in target_list: build_file, target, toolset = gyp.common.ParseQualifiedTarget( qualified_target) relative_build_file = gyp.common.RelativePath(build_file, options.toplevel_dir) build_files.add(relative_build_file) included_files = data[build_file]['included_files'] for included_file in included_files: # The included_files entries are relative to the dir of the build file # that included them, so we have to undo that and then make them relative # to the root dir. relative_include_file = gyp.common.RelativePath( gyp.common.UnrelativePath(included_file, build_file), options.toplevel_dir) abs_include_file = os.path.abspath(relative_include_file) # If the include file is from the ~/.gyp dir, we should use absolute path # so that relocating the src dir doesn't break the path. if (params['home_dot_gyp'] and abs_include_file.startswith(params['home_dot_gyp'])): build_files.add(abs_include_file) else: build_files.add(relative_include_file) base_path, output_file = CalculateMakefilePath(build_file, target + '.' + toolset + options.suffix + '.mk') spec = target_dicts[qualified_target] configs = spec['configurations'] part_of_all = qualified_target in needed_targets if limit_to_target_all and not part_of_all: continue relative_target = gyp.common.QualifiedTarget(relative_build_file, target, toolset) writer = AndroidMkWriter(android_top_dir) android_module = writer.Write(qualified_target, relative_target, base_path, output_file, spec, configs, part_of_all=part_of_all, write_alias_target=write_alias_targets, sdk_version=sdk_version) if android_module in android_modules: print ('ERROR: Android module names must be unique. The following ' 'targets both generate Android module name %s.\n %s\n %s' % (android_module, android_modules[android_module], qualified_target)) return android_modules[android_module] = qualified_target # Our root_makefile lives at the source root. Compute the relative path # from there to the output_file for including. mkfile_rel_path = gyp.common.RelativePath(output_file, os.path.dirname(makefile_path)) include_list.add(mkfile_rel_path) root_makefile.write('GYP_CONFIGURATION ?= %s\n' % default_configuration) root_makefile.write('GYP_VAR_PREFIX ?=\n') root_makefile.write('GYP_HOST_VAR_PREFIX ?=\n') root_makefile.write('GYP_HOST_MULTILIB ?=\n') # Write out the sorted list of includes. root_makefile.write('\n') for include_file in sorted(include_list): root_makefile.write('include $(LOCAL_PATH)/' + include_file + '\n') root_makefile.write('\n') if write_alias_targets: root_makefile.write(ALL_MODULES_FOOTER) root_makefile.close()
mit
CVML/cvxpy
examples/qcqp.py
12
2247
# for decimal division from __future__ import division import sys import cvxopt import numpy as np from pylab import * import math from cvxpy import * # Taken from CVX website http://cvxr.com/cvx/examples/ # Derived from Example: Finding the fastest mixing Markov chain on a graph # Ported from cvx matlab to cvxpy by Misrab Faizullah-Khan # Original comments below # Boyd & Vandenberghe, "Convex Optimization" # Joelle Skaf - 08/23/05 # # Solved a QCQP with 3 inequalities: # minimize 1/2 x'*P0*x + q0'*r + r0 # s.t. 1/2 x'*Pi*x + qi'*r + ri <= 0 for i=1,2,3 # and verifies that strong duality holds. # Input data n = 6 eps = sys.float_info.epsilon P0 = cvxopt.normal(n, n) eye = cvxopt.spmatrix(1.0, range(n), range(n)) P0 = P0.T * P0 + eps * eye print P0 P1 = cvxopt.normal(n, n) P1 = P1.T*P1 P2 = cvxopt.normal(n, n) P2 = P2.T*P2 P3 = cvxopt.normal(n, n) P3 = P3.T*P3 q0 = cvxopt.normal(n, 1) q1 = cvxopt.normal(n, 1) q2 = cvxopt.normal(n, 1) q3 = cvxopt.normal(n, 1) r0 = cvxopt.normal(1, 1) r1 = cvxopt.normal(1, 1) r2 = cvxopt.normal(1, 1) r3 = cvxopt.normal(1, 1) # Form the problem x = Variable(n) objective = Minimize( 0.5*quad_form(x,P0) + q0.T*x + r0 ) constraints = [ 0.5*quad_form(x,P1) + q1.T*x + r1 <= 0, 0.5*quad_form(x,P2) + q2.T*x + r2 <= 0, 0.5*quad_form(x,P3) + q3.T*x + r3 <= 0 ] # We now find the primal result and compare it to the dual result # to check if strong duality holds i.e. the duality gap is effectively zero p = Problem(objective, constraints) primal_result = p.solve() if p.status is OPTIMAL: # Note that since our data is random, we may need to run this program multiple times to get a feasible primal # When feasible, we can print out the following values print x.value # solution lam1 = constraints[0].dual_value lam2 = constraints[1].dual_value lam3 = constraints[2].dual_value P_lam = P0 + lam1*P1 + lam2*P2 + lam3*P3 q_lam = q0 + lam1*q1 + lam2*q2 + lam3*q3 r_lam = r0 + lam1*r1 + lam2*r2 + lam3*r3 dual_result = -0.5*q_lam.T*P_lam*q_lam + r_lam # ISSUE: dual result is matrix for some reason print 'Our duality gap is:' print (primal_result - dual_result)
gpl-3.0
snegovick/dswarm_simulator
localization_2d.py
1
12097
#!/usr/bin/env python import random import math import collision_detection as cd from extra import ceiling_camera, radio import sys write_pngs = False if len(sys.argv)>=2: if sys.argv[1] == "w": write_pngs = True bpp = 4 polygon_width = 1000 polygon_height = 500 pixel_per_mm = 1. obstacle_color = (0,0,0) obstacle_compare_color = (0,0,0) no_obstacle_compare_color = (255, 255, 255) width = int(polygon_width/pixel_per_mm) height = int(polygon_height/pixel_per_mm) import pygtk pygtk.require('2.0') import gtk, gobject, cairo objects2d = [] class obj2d(object): def __init__(self): self.x = None self.y = None self.theta = None def check_if_point_belongs(self, x, y): return False def click_handler(self, event): pass def draw(self, cairo): pass class polygon (obj2d, cd.aabb): def __init__(self, width, height): self.width = width self.height = height self.lw = 10 self.selected = False self.top = -self.height/2. + self.lw self.bottom = self.height/2. - self.lw self.left = self.lw-self.width/2. self.right = self.width - self.lw - self.width/2. def draw(self, cairo): cairo.set_source_rgb(1.0,1.0,1.0) cairo.rectangle(0,0,self.width/pixel_per_mm,self.height/pixel_per_mm) cairo.fill() cairo.set_line_width(self.lw) cairo.set_source_rgb(obstacle_color[0], obstacle_color[1], obstacle_color[2]) cairo.move_to(0,self.lw/2) cairo.line_to(self.width/pixel_per_mm,self.lw/2) cairo.move_to(self.width/pixel_per_mm-self.lw/2,0) cairo.line_to(self.width/pixel_per_mm-self.lw/2,self.height/pixel_per_mm) cairo.move_to(self.width/pixel_per_mm,self.height/pixel_per_mm-self.lw/2) cairo.line_to(0,self.height/pixel_per_mm-self.lw/2) cairo.move_to(self.lw/2,self.height/pixel_per_mm) cairo.line_to(self.lw/2,0) cairo.stroke() cairo.set_line_width(2.0) class square_obstacle (obj2d): def __init__(self, width, height, x, y, angle): self.width = width self.height = height self.angle = angle self.x = x self.y = y self.lw = 10 self.selected = False def check_if_point_belongs(self, x, y): pass def draw(self, cr): cr.translate(self.x/pixel_per_mm+w/2., self.y/pixel_per_mm+h/2.) cr.rotate(self.angle) cairo.set_line_width(self.lw) cairo.set_source_rgb(obstacle_color[0], obstacle_color[1], obstacle_color[2]) # cr.arc(w*self.x+w/2, h*self.y+h/2, self.r, 0.0, 2*math.pi) cr.move_to(0,0) cr.line_to(width/pixel_per_mm, 0) cr.move_to(width/pixel_per_mm,0) cr.line_to(width/pixel_per_mm, height/pixel_per_mm) cr.move_to(width/pixel_per_mm, height/pixel_per_mm) cr.line_to(0, height/pixel_per_mm) cr.move_to(0, height/pixel_per_mm) cr.line_to(0,0) cr.stroke() cr.identity_matrix() class circular_obstacle (obj2d): def __init__(self, x, y, r): self.x = x self.y = y self.r = r self.lw = 10 self.selected = False def check_if_point_belongs(self, x, y): if (math.sqrt((x-self.x)**2+(y-self.y)**2)<self.r): return True return False def draw(self, cr): cr.identity_matrix() cr.translate(self.x/pixel_per_mm+width/2., self.y/pixel_per_mm+height/2.) cr.set_line_width(self.lw) cr.set_source_rgb(obstacle_color[0], obstacle_color[1], obstacle_color[2]) cr.arc(0, 0, self.r, 0.0, 2*math.pi) cr.stroke() cr.identity_matrix() cr.set_line_width(2.0) def gaussian(mu, sigma, x): return math.exp(- ((mu - x) ** 2) / (sigma ** 2) / 2.0) / math.sqrt(2.0 * math.pi * (sigma ** 2)) class proto_robot(obj2d, cd.circle): def __init__(self, x, y, theta): self.x = x self.y = y self.selected = False self.theta = theta self.ranges = [0.0 for i in range(8)] self.r = 10. self.wheel_radius = 1. self.base = 85. self.wheels_omega = [0.0, 0.0] self.dtheta = 0.0 self.dx = 0.0 self.dy = 0.0 self.sense_noise = 10. self.beams = [0.0, 15.0, 90-15, 90+15, 180, 180+90-15, 180+90+15, 360-15] self.range_max = 100. self.wheel_noise = 0.1 def check_if_point_belongs(self, x, y): if (math.sqrt((x-self.x)**2+(y-self.y)**2)<self.r): return True return False def synchronize(self): pass def __repr__(self): return "x: "+str(self.x)+", y: "+str(self.y)+", theta: "+str(self.theta) def measurement_prob(self, measurements): prob = 1.0 for i,r in enumerate(self.ranges): prob*=gaussian(r, self.sense_noise, measurements[i]) return prob def sample_rangefinders(self, data): self.data = data w = width h = height if (False):#cd.circle_to_aabb_inverse_plain((self.x, self.y, self.r+self.range_max), self.polygon)[0] == True): #print "Far" self.ranges = [self.range_max]*8 else: #print "Near" for i, b in enumerate(self.beams): all_ranges = [] angle = self.theta+b*math.pi/180. all_ranges.append(int(cd.orientedline_to_inverse_aabb_dist(self.x+self.r*math.cos(angle), self.y+self.r*math.sin(angle), angle, self.range_max, self.polygon[0]))) for o in self.polygon[1:]: all_ranges.append(int(cd.ray_dist_to_circle_plain(o.x, o.y, o.r, self.x+self.r*math.cos(angle), self.y+self.r*math.sin(angle), angle, self.range_max)[1])) self.ranges[i] = min(all_ranges) # print "ranges:", self.ranges, "position:", self.x, self.y self.synchronize() #print "=====" class robot (proto_robot): def __init__(self, x, y, r, name, color, theta, sense_noise, radio_address): super(robot,self).__init__(x, y, theta) self.r = r self.range_max = 100. self.name = name self.sense_noise = sense_noise self.color = color self.ranges = [0.0 for i in range(8)] self.draw_beams = True self.data = None self.transciever = radio.transciever(radio_address) def click_handler(self, event): pass def update_position(self, polygon): ox = self.x oy = self.y ot = self.theta owheels = self.wheels_omega[:] #old_robot = robot(self.x, self.y, self.r, self.name, self.color, self.theta, self.sense_noise, None) deltaulr = [om*self.wheel_radius+random.gauss(0.0, self.wheel_noise) for om in self.wheels_omega] deltau = (deltaulr[0]+deltaulr[1])/2.0 self.dtheta = (deltaulr[0] - deltaulr[1])/self.base self.theta = self.theta+self.dtheta while self.theta>math.pi*2.0: self.theta-=math.pi*2.0 x = self.x y = self.y #print x, self.x, y, self.y, self.dtheta self.x = self.x + deltau*math.cos(self.theta) self.y = self.y + deltau*math.sin(self.theta) # new_robot = robot(self.x, self.y, self.r, self.name, self.color, self.theta, self.sense_noise) if (cd.circle_to_aabb_inverse(self, polygon)[0] == True): self.x = ox self.y = oy self.theta = ot self.wheels_omega = owheels return self def draw(self, cr): #self.update_position() w = width h = height cr.translate(self.x/pixel_per_mm+w/2., self.y/pixel_per_mm+h/2.) cr.rotate(self.theta) cr.set_source_rgb (self.color[0], self.color[1], self.color[2]) # cr.arc(w*self.x+w/2, h*self.y+h/2, self.r, 0.0, 2*math.pi) cr.arc(0, 0, self.r/pixel_per_mm, 0.0, 2*math.pi) cr.move_to(0,0) cr.line_to(self.r/pixel_per_mm,0) cr.stroke() cr.set_source_rgb (1., 0, 0) dist = [(0.0, 0.0, 0.0) for i in range(8)] for i, b in enumerate(self.beams): angle = self.theta+b*math.pi/180. if self.draw_beams: for b, l in zip(self.beams, self.ranges): cr.rotate(b*math.pi/180.0) cr.set_source_rgba (0, 0, 0, 0.5) cr.move_to(self.r/pixel_per_mm+3, 0) cr.line_to(self.r/pixel_per_mm+l+3, 0) cr.stroke() cr.rotate(-b*math.pi/180.0) class Overlay: def draw(self, context, width, height): pass # Create a GTK+ widget on which we will draw using Cairo class Screen(gtk.DrawingArea): # Draw in response to an expose-event __gsignals__ = { "expose-event": "override" } step = 0 overlay_list = [] radio = radio.radio() cam = ceiling_camera.camera_service(0) radio.add_transciever(cam.transciever) active_object = None def expose_overlays(self, c, w, h): for o in self.overlay_list: o.draw(c, w, h) def periodic(self): self.queue_draw() return True def radio_init(self): layer = objects2d[1] for r in layer: self.radio.add_transciever(r.transciever) self.cam.track_object(r.name, r) def button_press_event(self, widget, event): if event.button == 1: layer0 = objects2d[0] layer1 = objects2d[1] for obj in layer0+layer1: if obj.check_if_point_belongs(event.x - width/2., event.y-height/2.): if self.active_object != None: self.active_object.selected = False self.active_object = obj self.active_object.selected = True obj.click_handler(event) return elif event.button == 3: if self.active_object!=None: self.active_object.click_handler(event) else: print "Select object" # Handle the expose-event by drawing def do_expose_event(self, event): self.radio.process() self.cam.process() # Create the cairo context cr_gdk = self.window.cairo_create() surface = cr_gdk.get_target() cr_surf = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height) cr = cairo.Context(cr_surf) # Restrict Cairo to the exposed area; avoid extra work cr.rectangle(event.area.x, event.area.y, event.area.width, event.area.height) cr.clip() #self.expose_overlays(cr, width, height) # polygon and obstacles layer = objects2d[0] for obj in layer: obj.draw(cr) cr_surf.flush() data = cr_surf.get_data() layer = objects2d[1] for r in layer: r.sample_rangefinders(data) for obj in layer: obj.update_position(objects2d[0][0]) obj.draw(cr) cr.identity_matrix() # for r in layer: # r.update_position() self.expose_overlays(cr, width, height) if write_pngs: cr_surf.write_to_png("/tmp/"+str(self.step).zfill(6)+".png") self.step += 1 cr_gdk.set_source_surface(cr_surf) cr_gdk.paint() # GTK mumbo-jumbo to show the widget in a window and quit when it's closed def run(Widget): window = gtk.Window() window.resize(width, height) window.connect("delete-event", gtk.main_quit) widget = Widget() widget.radio_init() widget.connect("button_press_event", widget.button_press_event) widget.set_events(gtk.gdk.BUTTON_PRESS_MASK) # print dir(widget) # widget.m_window = window gobject.timeout_add(10, widget.periodic) widget.x = 0. widget.y = 0. widget.show() window.add(widget) window.present() gtk.main() if __name__ == "__main__": run(Screen)
gpl-3.0
trdean/grEME
gr-channels/python/channels/phase_bal.py
60
3652
#!/usr/bin/env python ################################################## # Gnuradio Python Flow Graph # Title: IQ Phase Balancer # Author: matt@ettus.com # Generated: Thu Aug 1 11:49:41 2013 ################################################## from gnuradio import blocks from gnuradio import filter from gnuradio import gr from gnuradio.filter import firdes class phase_bal(gr.hier_block2): def __init__(self, alpha=0): gr.hier_block2.__init__( self, "IQ Phase Balancer", gr.io_signature(1, 1, gr.sizeof_gr_complex*1), gr.io_signature(1, 1, gr.sizeof_gr_complex*1), ) ################################################## # Parameters ################################################## self.alpha = alpha ################################################## # Blocks ################################################## self.filter_single_pole_iir_filter_xx_0 = filter.single_pole_iir_filter_ff(alpha, 1) self.blocks_sub_xx_1 = blocks.sub_ff(1) self.blocks_sub_xx_0 = blocks.sub_ff(1) self.blocks_multiply_xx_2 = blocks.multiply_vff(1) self.blocks_multiply_xx_1 = blocks.multiply_vff(1) self.blocks_multiply_xx_0 = blocks.multiply_vff(1) self.blocks_multiply_const_vxx_0 = blocks.multiply_const_vff((2, )) self.blocks_float_to_complex_0 = blocks.float_to_complex(1) self.blocks_divide_xx_0 = blocks.divide_ff(1) self.blocks_complex_to_mag_squared_0 = blocks.complex_to_mag_squared(1) self.blocks_complex_to_float_0 = blocks.complex_to_float(1) ################################################## # Connections ################################################## self.connect((self.blocks_complex_to_float_0, 0), (self.blocks_multiply_xx_0, 0)) self.connect((self.blocks_complex_to_float_0, 1), (self.blocks_multiply_xx_0, 1)) self.connect((self.blocks_multiply_xx_0, 0), (self.blocks_divide_xx_0, 0)) self.connect((self.blocks_sub_xx_0, 0), (self.blocks_float_to_complex_0, 1)) self.connect((self.blocks_multiply_xx_1, 0), (self.blocks_sub_xx_0, 1)) self.connect((self.filter_single_pole_iir_filter_xx_0, 0), (self.blocks_multiply_xx_1, 1)) self.connect((self.blocks_complex_to_float_0, 0), (self.blocks_multiply_xx_1, 0)) self.connect((self.blocks_multiply_xx_2, 0), (self.blocks_sub_xx_1, 1)) self.connect((self.blocks_complex_to_float_0, 1), (self.blocks_sub_xx_0, 0)) self.connect((self.blocks_sub_xx_1, 0), (self.blocks_float_to_complex_0, 0)) self.connect((self.blocks_complex_to_mag_squared_0, 0), (self.blocks_divide_xx_0, 1)) self.connect((self.blocks_complex_to_float_0, 0), (self.blocks_sub_xx_1, 0)) self.connect((self.blocks_divide_xx_0, 0), (self.blocks_multiply_const_vxx_0, 0)) self.connect((self.blocks_multiply_const_vxx_0, 0), (self.filter_single_pole_iir_filter_xx_0, 0)) self.connect((self, 0), (self.blocks_complex_to_float_0, 0)) self.connect((self, 0), (self.blocks_complex_to_mag_squared_0, 0)) self.connect((self.blocks_float_to_complex_0, 0), (self, 0)) self.connect((self.filter_single_pole_iir_filter_xx_0, 0), (self.blocks_multiply_xx_2, 0)) self.connect((self.blocks_complex_to_float_0, 1), (self.blocks_multiply_xx_2, 1)) # QT sink close method reimplementation def get_alpha(self): return self.alpha def set_alpha(self, alpha): self.alpha = alpha self.filter_single_pole_iir_filter_xx_0.set_taps(self.alpha)
gpl-3.0
marathoncoder/scrapy
scrapy_py/settings.py
1
3915
# -*- coding: utf-8 -*- # Scrapy settings for scrapy_py project # # For simplicity, this file contains only settings considered important or # commonly used. You can find more settings consulting the documentation: # # http://doc.scrapy.org/en/latest/topics/settings.html # http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html # http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html BOT_NAME = 'scrapy_py' SPIDER_MODULES = ['scrapy_py.spiders'] NEWSPIDER_MODULE = 'scrapy_py.spiders' # 设置下载间隔为250ms DOWNLOAD_DELAY = 0.25 #禁止cookies,防止被ban COOKIES_ENABLED = False #禁止重试 RETRY_ENABLED = False #减小下载超时 DOWNLOAD_TIMEOUT = 15 #关闭重定向 REDIRECT_ENABLED = False #最大进程数设置,默认 10 REACTOR_THREADPOOL_MAXSIZE = 15 # mongoDB 设置 MONGODB_SERVER = 'localhost' MONGODB_PORT = 27017 MONGODB_DB = 'scrapy' MONGODB_COLLECTION = 'scrapys' # 为了启用一个Item Pipeline组件,你必须将它的类添加到 ITEM_PIPELINES 配置 # item按数字从低到高的顺序,通过pipeline,通常将这些数字定义在0-1000范围内 ITEM_PIPELINES = { 'scrapy_py.pipelines.JsonPipeline': 300, 'scrapy_py.pipelines.MongoPipeline': 500 } # Crawl responsibly by identifying yourself (and your website) on the user-agent #USER_AGENT = 'scrapy_py (+http://www.yourdomain.com)' # Obey robots.txt rules ROBOTSTXT_OBEY = True COMMANDS_MODULE = 'scrapy_py.commands' # Configure maximum concurrent requests performed by Scrapy (default: 16) #CONCURRENT_REQUESTS = 32 # Configure a delay for requests for the same website (default: 0) # See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay # See also autothrottle settings and docs #DOWNLOAD_DELAY = 3 # The download delay setting will honor only one of: #CONCURRENT_REQUESTS_PER_DOMAIN = 16 #CONCURRENT_REQUESTS_PER_IP = 16 # Disable cookies (enabled by default) #COOKIES_ENABLED = False # Disable Telnet Console (enabled by default) #TELNETCONSOLE_ENABLED = False # Override the default request headers: #DEFAULT_REQUEST_HEADERS = { # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', # 'Accept-Language': 'en', #} # Enable or disable spider middlewares # See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html #SPIDER_MIDDLEWARES = { # 'scrapy_py.middlewares.MyCustomSpiderMiddleware': 543, #} # Enable or disable downloader middlewares # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html #DOWNLOADER_MIDDLEWARES = { # 'scrapy_py.middlewares.MyCustomDownloaderMiddleware': 543, #} # Enable or disable extensions # See http://scrapy.readthedocs.org/en/latest/topics/extensions.html #EXTENSIONS = { # 'scrapy.extensions.telnet.TelnetConsole': None, #} # Configure item pipelines # See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html #ITEM_PIPELINES = { # 'scrapy_py.pipelines.SomePipeline': 300, #} # Enable and configure the AutoThrottle extension (disabled by default) # See http://doc.scrapy.org/en/latest/topics/autothrottle.html #AUTOTHROTTLE_ENABLED = True # The initial download delay #AUTOTHROTTLE_START_DELAY = 5 # The maximum download delay to be set in case of high latencies #AUTOTHROTTLE_MAX_DELAY = 60 # The average number of requests Scrapy should be sending in parallel to # each remote server #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 # Enable showing throttling stats for every response received: #AUTOTHROTTLE_DEBUG = False # Enable and configure HTTP caching (disabled by default) # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings #HTTPCACHE_ENABLED = True #HTTPCACHE_EXPIRATION_SECS = 0 #HTTPCACHE_DIR = 'httpcache' #HTTPCACHE_IGNORE_HTTP_CODES = [] #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
mit
RadonX-ROM/external_skia
bench/bench_util.py
145
13462
''' Created on May 19, 2011 @author: bungeman ''' import os import re import math # bench representation algorithm constant names ALGORITHM_AVERAGE = 'avg' ALGORITHM_MEDIAN = 'med' ALGORITHM_MINIMUM = 'min' ALGORITHM_25TH_PERCENTILE = '25th' # Regular expressions used throughout. PER_SETTING_RE = '([^\s=]+)(?:=(\S+))?' SETTINGS_RE = 'skia bench:((?:\s+' + PER_SETTING_RE + ')*)' BENCH_RE = 'running bench (?:\[\d+ \d+\] )?\s*(\S+)' TIME_RE = '(?:(\w*)msecs = )?\s*((?:\d+\.\d+)(?:,\s*\d+\.\d+)*)' # non-per-tile benches have configs that don't end with ']' or '>' CONFIG_RE = '(\S+[^\]>]):\s+((?:' + TIME_RE + '\s+)+)' # per-tile bench lines are in the following format. Note that there are # non-averaged bench numbers in separate lines, which we ignore now due to # their inaccuracy. TILE_RE = (' tile_(\S+): tile \[\d+,\d+\] out of \[\d+,\d+\] <averaged>:' ' ((?:' + TIME_RE + '\s+)+)') # for extracting tile layout TILE_LAYOUT_RE = ' out of \[(\d+),(\d+)\] <averaged>: ' PER_SETTING_RE_COMPILED = re.compile(PER_SETTING_RE) SETTINGS_RE_COMPILED = re.compile(SETTINGS_RE) BENCH_RE_COMPILED = re.compile(BENCH_RE) TIME_RE_COMPILED = re.compile(TIME_RE) CONFIG_RE_COMPILED = re.compile(CONFIG_RE) TILE_RE_COMPILED = re.compile(TILE_RE) TILE_LAYOUT_RE_COMPILED = re.compile(TILE_LAYOUT_RE) class BenchDataPoint: """A single data point produced by bench. """ def __init__(self, bench, config, time_type, time, settings, tile_layout='', per_tile_values=[], per_iter_time=[]): # string name of the benchmark to measure self.bench = bench # string name of the configurations to run self.config = config # type of the timer in string: '' (walltime), 'c' (cpu) or 'g' (gpu) self.time_type = time_type # float number of the bench time value self.time = time # dictionary of the run settings self.settings = settings # how tiles cover the whole picture: '5x3' means 5 columns and 3 rows self.tile_layout = tile_layout # list of float for per_tile bench values, if applicable self.per_tile_values = per_tile_values # list of float for per-iteration bench time, if applicable self.per_iter_time = per_iter_time def __repr__(self): return "BenchDataPoint(%s, %s, %s, %s, %s)" % ( str(self.bench), str(self.config), str(self.time_type), str(self.time), str(self.settings), ) class _ExtremeType(object): """Instances of this class compare greater or less than other objects.""" def __init__(self, cmpr, rep): object.__init__(self) self._cmpr = cmpr self._rep = rep def __cmp__(self, other): if isinstance(other, self.__class__) and other._cmpr == self._cmpr: return 0 return self._cmpr def __repr__(self): return self._rep Max = _ExtremeType(1, "Max") Min = _ExtremeType(-1, "Min") class _ListAlgorithm(object): """Algorithm for selecting the representation value from a given list. representation is one of the ALGORITHM_XXX representation types.""" def __init__(self, data, representation=None): if not representation: representation = ALGORITHM_AVERAGE # default algorithm self._data = data self._len = len(data) if representation == ALGORITHM_AVERAGE: self._rep = sum(self._data) / self._len else: self._data.sort() if representation == ALGORITHM_MINIMUM: self._rep = self._data[0] else: # for percentiles, we use the value below which x% of values are # found, which allows for better detection of quantum behaviors. if representation == ALGORITHM_MEDIAN: x = int(round(0.5 * self._len + 0.5)) elif representation == ALGORITHM_25TH_PERCENTILE: x = int(round(0.25 * self._len + 0.5)) else: raise Exception("invalid representation algorithm %s!" % representation) self._rep = self._data[x - 1] def compute(self): return self._rep def _ParseAndStoreTimes(config_re_compiled, is_per_tile, line, bench, value_dic, layout_dic): """Parses given bench time line with regex and adds data to value_dic. config_re_compiled: precompiled regular expression for parsing the config line. is_per_tile: boolean indicating whether this is a per-tile bench. If so, we add tile layout into layout_dic as well. line: input string line to parse. bench: name of bench for the time values. value_dic: dictionary to store bench values. See bench_dic in parse() below. layout_dic: dictionary to store tile layouts. See parse() for descriptions. """ for config in config_re_compiled.finditer(line): current_config = config.group(1) tile_layout = '' if is_per_tile: # per-tile bench, add name prefix current_config = 'tile_' + current_config layouts = TILE_LAYOUT_RE_COMPILED.search(line) if layouts and len(layouts.groups()) == 2: tile_layout = '%sx%s' % layouts.groups() times = config.group(2) for new_time in TIME_RE_COMPILED.finditer(times): current_time_type = new_time.group(1) iters = [float(i) for i in new_time.group(2).strip().split(',')] value_dic.setdefault(bench, {}).setdefault( current_config, {}).setdefault(current_time_type, []).append( iters) layout_dic.setdefault(bench, {}).setdefault( current_config, {}).setdefault(current_time_type, tile_layout) def parse_skp_bench_data(directory, revision, rep, default_settings=None): """Parses all the skp bench data in the given directory. Args: directory: string of path to input data directory. revision: git hash revision that matches the data to process. rep: bench representation algorithm, see bench_util.py. default_settings: dictionary of other run settings. See writer.option() in bench/benchmain.cpp. Returns: A list of BenchDataPoint objects. """ revision_data_points = [] file_list = os.listdir(directory) file_list.sort() for bench_file in file_list: scalar_type = None # Scalar type, if any, is in the bench filename after 'scalar_'. if (bench_file.startswith('bench_' + revision + '_data_')): if bench_file.find('scalar_') > 0: components = bench_file.split('_') scalar_type = components[components.index('scalar') + 1] else: # Skips non skp bench files. continue with open('/'.join([directory, bench_file]), 'r') as file_handle: settings = dict(default_settings or {}) settings['scalar'] = scalar_type revision_data_points.extend(parse(settings, file_handle, rep)) return revision_data_points # TODO(bensong): switch to reading JSON output when available. This way we don't # need the RE complexities. def parse(settings, lines, representation=None): """Parses bench output into a useful data structure. ({str:str}, __iter__ -> str) -> [BenchDataPoint] representation is one of the ALGORITHM_XXX types.""" benches = [] current_bench = None # [bench][config][time_type] -> [[per-iter values]] where per-tile config # has per-iter value list for each tile [[<tile1_iter1>,<tile1_iter2>,...], # [<tile2_iter1>,<tile2_iter2>,...],...], while non-per-tile config only # contains one list of iterations [[iter1, iter2, ...]]. bench_dic = {} # [bench][config][time_type] -> tile_layout layout_dic = {} for line in lines: # see if this line is a settings line settingsMatch = SETTINGS_RE_COMPILED.search(line) if (settingsMatch): settings = dict(settings) for settingMatch in PER_SETTING_RE_COMPILED.finditer(settingsMatch.group(1)): if (settingMatch.group(2)): settings[settingMatch.group(1)] = settingMatch.group(2) else: settings[settingMatch.group(1)] = True # see if this line starts a new bench new_bench = BENCH_RE_COMPILED.search(line) if new_bench: current_bench = new_bench.group(1) # add configs on this line to the bench_dic if current_bench: if line.startswith(' tile_') : _ParseAndStoreTimes(TILE_RE_COMPILED, True, line, current_bench, bench_dic, layout_dic) else: _ParseAndStoreTimes(CONFIG_RE_COMPILED, False, line, current_bench, bench_dic, layout_dic) # append benches to list for bench in bench_dic: for config in bench_dic[bench]: for time_type in bench_dic[bench][config]: tile_layout = '' per_tile_values = [] # empty for non-per-tile configs per_iter_time = [] # empty for per-tile configs bench_summary = None # a single final bench value if len(bench_dic[bench][config][time_type]) > 1: # per-tile config; compute representation for each tile per_tile_values = [ _ListAlgorithm(iters, representation).compute() for iters in bench_dic[bench][config][time_type]] # use sum of each tile representation for total bench value bench_summary = sum(per_tile_values) # extract tile layout tile_layout = layout_dic[bench][config][time_type] else: # get the list of per-iteration values per_iter_time = bench_dic[bench][config][time_type][0] bench_summary = _ListAlgorithm( per_iter_time, representation).compute() benches.append(BenchDataPoint( bench, config, time_type, bench_summary, settings, tile_layout, per_tile_values, per_iter_time)) return benches class LinearRegression: """Linear regression data based on a set of data points. ([(Number,Number)]) There must be at least two points for this to make sense.""" def __init__(self, points): n = len(points) max_x = Min min_x = Max Sx = 0.0 Sy = 0.0 Sxx = 0.0 Sxy = 0.0 Syy = 0.0 for point in points: x = point[0] y = point[1] max_x = max(max_x, x) min_x = min(min_x, x) Sx += x Sy += y Sxx += x*x Sxy += x*y Syy += y*y denom = n*Sxx - Sx*Sx if (denom != 0.0): B = (n*Sxy - Sx*Sy) / denom else: B = 0.0 a = (1.0/n)*(Sy - B*Sx) se2 = 0 sB2 = 0 sa2 = 0 if (n >= 3 and denom != 0.0): se2 = (1.0/(n*(n-2)) * (n*Syy - Sy*Sy - B*B*denom)) sB2 = (n*se2) / denom sa2 = sB2 * (1.0/n) * Sxx self.slope = B self.intercept = a self.serror = math.sqrt(max(0, se2)) self.serror_slope = math.sqrt(max(0, sB2)) self.serror_intercept = math.sqrt(max(0, sa2)) self.max_x = max_x self.min_x = min_x def __repr__(self): return "LinearRegression(%s, %s, %s, %s, %s)" % ( str(self.slope), str(self.intercept), str(self.serror), str(self.serror_slope), str(self.serror_intercept), ) def find_min_slope(self): """Finds the minimal slope given one standard deviation.""" slope = self.slope intercept = self.intercept error = self.serror regr_start = self.min_x regr_end = self.max_x regr_width = regr_end - regr_start if slope < 0: lower_left_y = slope*regr_start + intercept - error upper_right_y = slope*regr_end + intercept + error return min(0, (upper_right_y - lower_left_y) / regr_width) elif slope > 0: upper_left_y = slope*regr_start + intercept + error lower_right_y = slope*regr_end + intercept - error return max(0, (lower_right_y - upper_left_y) / regr_width) return 0 def CreateRevisionLink(revision_number): """Returns HTML displaying the given revision number and linking to that revision's change page at code.google.com, e.g. http://code.google.com/p/skia/source/detail?r=2056 """ return '<a href="http://code.google.com/p/skia/source/detail?r=%s">%s</a>'%( revision_number, revision_number) def main(): foo = [[0.0, 0.0], [0.0, 1.0], [0.0, 2.0], [0.0, 3.0]] LinearRegression(foo) if __name__ == "__main__": main()
bsd-3-clause
npuichigo/ttsflow
third_party/tensorflow/tensorflow/contrib/cudnn_rnn/python/kernel_tests/cudnn_rnn_ops_test.py
8
25732
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Cudnn RNN models.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import os import unittest import numpy as np from tensorflow.contrib.cudnn_rnn.python.ops import cudnn_rnn_ops from tensorflow.core.protobuf import saver_pb2 from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.framework.test_util import TensorFlowTestCase from tensorflow.python.ops import array_ops from tensorflow.python.ops import gradient_checker from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import rnn as rnn_lib from tensorflow.python.ops import rnn_cell_impl from tensorflow.python.ops import state_ops from tensorflow.python.ops import variables from tensorflow.python.ops.losses import losses from tensorflow.python.platform import googletest from tensorflow.python.platform import test from tensorflow.python.training import gradient_descent from tensorflow.python.training import saver as saver_lib def _create_cudnn_compatible_canonical_rnn(cudnn_model, inputs, use_block_cell, scope="rnn"): model = cudnn_model.rnn_mode if model not in (cudnn_rnn_ops.CUDNN_LSTM, cudnn_rnn_ops.CUDNN_GRU): raise ValueError("%s is not supported!" % model) if model == cudnn_rnn_ops.CUDNN_GRU and use_block_cell: raise ValueError("gru is not supported when using block cell!") num_units = cudnn_model.num_units num_layers = cudnn_model.num_layers # To reuse cuDNN-trained models, must use cudnn compatible rnn cells. if use_block_cell: single_cell = lambda: cudnn_rnn_ops.CudnnCompatibleLSTMBlockCell(num_units) else: if model == cudnn_rnn_ops.CUDNN_LSTM: single_cell = lambda: cudnn_rnn_ops.CudnnCompatibleLSTMCell(num_units) else: single_cell = lambda: cudnn_rnn_ops.CudnnCompatibleGRUCell(num_units) cell = rnn_cell_impl.MultiRNNCell([single_cell() for _ in range(num_layers)]) return rnn_lib.dynamic_rnn( cell, inputs, dtype=dtypes.float32, time_major=True, scope=scope) class CudnnRNNTest(TensorFlowTestCase): def _CreateModel(self, rnn_mode, num_layers, num_units, input_size, input_mode="linear_input", dropout=0.): if rnn_mode == cudnn_rnn_ops.CUDNN_LSTM: model = cudnn_rnn_ops.CudnnLSTM( num_layers, num_units, input_size, dropout=dropout) elif rnn_mode == cudnn_rnn_ops.CUDNN_GRU: model = cudnn_rnn_ops.CudnnGRU( num_layers, num_units, input_size, dropout=dropout) elif rnn_mode == cudnn_rnn_ops.CUDNN_RNN_TANH: model = cudnn_rnn_ops.CudnnRNNTanh( num_layers, num_units, input_size, dropout=dropout) elif rnn_mode == cudnn_rnn_ops.CUDNN_RNN_RELU: model = cudnn_rnn_ops.CudnnRNNRelu( num_layers, num_units, input_size, dropout=dropout) else: raise ValueError("Invalid rnn_mode: %s" % rnn_mode) return model def _create_params_savable(self, params, model): """Create a RNNParamsSaveable for the weight and bias parameters. Args: params: a Variable for weight and bias parameters. model: a CudnnRNN model. """ params_saveable = cudnn_rnn_ops.RNNParamsSaveable( model, model.params_to_canonical, model.canonical_to_params, [params], "rnn") ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, params_saveable) def _testSaveRestoreVariable(self, rnn_mode): model = self._CreateModel(rnn_mode, num_layers=2, num_units=7, input_size=3) random_seed.set_random_seed(1234) params_size_t = model.params_size() params = variables.Variable( random_ops.random_uniform([params_size_t]), validate_shape=False) self._create_params_savable(params, model) save_path = os.path.join(self.get_temp_dir(), "save-restore-variable-test") saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2) with self.test_session(use_gpu=True) as sess: sess.run(variables.global_variables_initializer()) params_v = sess.run(params) val = saver.save(sess, save_path) self.assertEqual(save_path, val) with self.test_session(use_gpu=True) as sess: reset_params = state_ops.assign(params, array_ops.zeros([params_size_t])) sess.run(reset_params) saver.restore(sess, save_path) params_v_restored = sess.run(params) self.assertAllEqual(params_v, params_v_restored) def _build_forward_cudnn_model(self, rnn_mode, num_layers, num_units, input_data, is_training=False): input_data_shape = input_data.get_shape().with_rank(3) batch_size = input_data_shape[1].value input_size = input_data_shape[2].value model = self._CreateModel(rnn_mode, num_layers, num_units, input_size) # Set zero init input states input_h = constant_op.constant( np.zeros([num_layers, batch_size, num_units]), dtype=dtypes.float32) has_input_c = (rnn_mode == cudnn_rnn_ops.CUDNN_LSTM) if has_input_c: input_c = constant_op.constant( np.zeros([num_layers, batch_size, num_units]), dtype=dtypes.float32) # Set rnn params params_size_t = model.params_size() params = variables.Variable( random_ops.random_uniform([params_size_t]), validate_shape=False) args = { "input_data": input_data, "input_h": input_h, "params": params, "is_training": is_training } if has_input_c: args["input_c"] = input_c # Build cell output_tuple = model(**args) # Create savable objects for params self._create_params_savable(params, model) return output_tuple, model, params @unittest.skipUnless(test.is_built_with_cuda(), "Test only applicable when running on GPUs") def testCudnnCompatibleRnnCells(self): configs = [ { "num_layers": 1, "seq_length": 3, "num_units": 4, "input_size": 5, "batch_size": 6, }, { "num_layers": 2, "seq_length": 8, "num_units": 4, "input_size": 8, "batch_size": 16, }, { "num_layers": 2, "seq_length": 3, "num_units": 4, "input_size": 5, "batch_size": 6, }, { "num_layers": 1, "seq_length": 2, "num_units": 2, "input_size": 4, "batch_size": 1, }, ] for rnn, cfg, use_block_cell in itertools.product( (cudnn_rnn_ops.CUDNN_LSTM,), configs, (True, False,)): self._testCudnnCompatibleRnnCells(cfg["num_layers"], cfg["seq_length"], cfg["num_units"], cfg["input_size"], cfg["batch_size"], rnn, use_block_cell) # TODO(jamesqin): Add CudnnCompatibleGRUBlockCell. for rnn, cfg, use_block_cell in itertools.product( (cudnn_rnn_ops.CUDNN_GRU,), configs, (False,)): self._testCudnnCompatibleRnnCells(cfg["num_layers"], cfg["seq_length"], cfg["num_units"], cfg["input_size"], cfg["batch_size"], rnn, use_block_cell) def _testCudnnCompatibleRnnCells(self, num_layers, seq_length, num_units, input_size, batch_size, rnn_mode, use_block_cell): has_state_c = rnn_mode == cudnn_rnn_ops.CUDNN_LSTM np.random.seed(0) # Train graph with ops.Graph().as_default(): random_seed.set_random_seed(299) input_data = array_ops.placeholder( dtypes.float32, shape=[seq_length, batch_size, input_size]) output_tuple, cudnn_model, cudnn_params = self._build_forward_cudnn_model( rnn_mode, num_layers, num_units, input_data, is_training=True) target_output = array_ops.placeholder(dtype=dtypes.float32, shape=None) total_sum = sum(map(math_ops.reduce_sum, output_tuple)) loss_op = losses.log_loss(labels=target_output, predictions=total_sum) optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1e-2) train_op = optimizer.minimize(loss_op) saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2) # Train Cudnn model with self.test_session( use_gpu=True, graph=ops.get_default_graph()) as sess: sess.run(variables.global_variables_initializer()) # Train 128 steps num_steps = 128 for _ in range(num_steps): inputs = np.random.rand(seq_length, batch_size, input_size).astype(np.float32) targets = np.random.rand() sess.run( train_op, feed_dict={input_data: inputs, target_output: targets}) save_path = os.path.join(self.get_temp_dir(), ("cudnn-rnn-%s-test" % rnn_mode)) save_v = saver.save(sess, save_path) self.assertEqual(save_path, save_v) cudnn_params_v = sess.run(cudnn_params) # cuDNN inference graph with ops.Graph().as_default(): random_seed.set_random_seed(299) cudnn_inputs = array_ops.placeholder( dtypes.float32, shape=[seq_length, batch_size, input_size]) (cudnn_output_tuple, cudnn_model, cudnn_params) = self._build_forward_cudnn_model( rnn_mode, num_layers, num_units, cudnn_inputs, is_training=False) saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2) inference_input = np.random.rand(seq_length, batch_size, input_size).astype(np.float32) with self.test_session( use_gpu=True, graph=ops.get_default_graph()) as sess: sess.run(variables.global_variables_initializer()) saver.restore(sess, save_path) restored_cudnn_params_v = sess.run(cudnn_params) self.assertAllEqual(cudnn_params_v, restored_cudnn_params_v) # Cudnn inference cudnn_output = sess.run( cudnn_output_tuple, feed_dict={cudnn_inputs: inference_input}) # Canonical RNN inference graph with ops.Graph().as_default(): random_seed.set_random_seed(299) cell_inputs = array_ops.placeholder( dtypes.float32, shape=[seq_length, batch_size, input_size]) (output, states) = _create_cudnn_compatible_canonical_rnn( cudnn_model, cell_inputs, use_block_cell) saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2) with self.test_session( use_gpu=True, graph=ops.get_default_graph()) as sess: saver.restore(sess, save_path) # BlockCell inference output_v, states_v = sess.run( [output, states], feed_dict={cell_inputs: inference_input}) # output across timestamps are packed into one tensor. self.assertAllClose(cudnn_output[0], output_v, atol=1e-6, rtol=1e-6) for i in range(num_layers): if has_state_c: # output_h self.assertAllClose( cudnn_output[1][i, :], states_v[i].h, atol=1e-6, rtol=1e-6) # output_c self.assertAllClose( cudnn_output[2][i, :], states_v[i].c, atol=1e-6, rtol=1e-6) else: self.assertAllClose( cudnn_output[1][i, :], states_v[i], atol=1e-6, rtol=1e-6) def _testSaveRestoreOutput(self, rnn_mode): num_layers = 2 num_units = 7 input_size = 7 seq_length = 10 batch_size = 5 dir_count = 1 model = self._CreateModel(rnn_mode, num_layers, num_units, input_size) params_size_t = model.params_size() params = variables.Variable( array_ops.ones([params_size_t]), validate_shape=False) self._create_params_savable(params, model) save_path = os.path.join(self.get_temp_dir(), "save-restore-output-test") saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2) has_input_c = (rnn_mode == cudnn_rnn_ops.CUDNN_LSTM) input_data = array_ops.ones([seq_length, batch_size, input_size]) input_h = array_ops.ones([num_layers * dir_count, batch_size, num_units]) if has_input_c: input_c = array_ops.ones([num_layers * dir_count, batch_size, num_units]) outputs = model( input_data=input_data, input_h=input_h, input_c=input_c, params=params, is_training=False) else: outputs = model( input_data=input_data, input_h=input_h, params=params, is_training=False) total_sum = sum(map(math_ops.reduce_sum, outputs)) with self.test_session(use_gpu=True) as sess: sess.run(variables.global_variables_initializer()) total_sum_v = sess.run(total_sum) val = saver.save(sess, save_path) self.assertEqual(save_path, val) with self.test_session(use_gpu=True) as sess: reset_params = state_ops.assign(params, array_ops.zeros([params_size_t])) sess.run(reset_params) saver.restore(sess, save_path) total_sum_v_restored = sess.run(total_sum) self.assertAllEqual(total_sum_v, total_sum_v_restored) @unittest.skipUnless(test.is_built_with_cuda(), "Test only applicable when running on GPUs") def testSaveRestore(self): rnn_modes = [ cudnn_rnn_ops.CUDNN_LSTM, cudnn_rnn_ops.CUDNN_GRU, cudnn_rnn_ops.CUDNN_RNN_TANH, cudnn_rnn_ops.CUDNN_RNN_RELU ] for rnn_mode in rnn_modes: self._testSaveRestoreVariable(rnn_mode) self._testSaveRestoreOutput(rnn_mode) def _MinLSTMParamSize(self, num_layers, num_units, input_size, input_mode="auto_select", direction=cudnn_rnn_ops.CUDNN_RNN_UNIDIRECTION): if direction != cudnn_rnn_ops.CUDNN_RNN_UNIDIRECTION: # TODO(zhengxq): support bidirection in parameter size estimate. raise ValueError("Only unidirection in parameter size estimate") first_layer_weights = 4 * num_units * (num_units + input_size) higher_layer_weights = 8 * (num_layers - 1) * num_units * num_units all_biases = 8 * num_layers * num_units return first_layer_weights + higher_layer_weights + all_biases def _testOneLSTMParamsSize(self, num_layers, num_units, input_size): min_params_size = self._MinLSTMParamSize(num_layers, num_units, input_size) model = self._CreateModel(cudnn_rnn_ops.CUDNN_LSTM, num_layers, num_units, input_size) params_size = model.params_size() with self.test_session(use_gpu=True) as sess: params_size_v = sess.run(params_size) self.assertLessEqual(min_params_size, params_size_v) @unittest.skipUnless(test.is_built_with_cuda(), "Test only applicable when running on GPUs") def testLSTMParamsSize(self): test_configs = [ [4, 200, 200], [4, 200, 300], [4, 200, 100], [1, 100, 200], [2, 200, 100], [3, 200, 400], ] with ops.Graph().as_default(): for (num_layers, num_units, input_size) in test_configs: self._testOneLSTMParamsSize(num_layers, num_units, input_size) def _testOneSimpleInference(self, rnn_mode, num_layers, num_units, input_size, batch_size, seq_length, dir_count, dropout, expected, tolerance): random_seed.set_random_seed(5678) model = self._CreateModel( rnn_mode, num_layers, num_units, input_size, input_mode="auto_select", dropout=dropout) has_input_c = (rnn_mode == cudnn_rnn_ops.CUDNN_LSTM) params_size_t = model.params_size() input_data = array_ops.ones([seq_length, batch_size, input_size]) input_h = array_ops.ones([num_layers * dir_count, batch_size, num_units]) params = variables.Variable( array_ops.ones([params_size_t]), validate_shape=False) if has_input_c: input_c = array_ops.ones([num_layers * dir_count, batch_size, num_units]) output, output_h, output_c = model( input_data=input_data, input_h=input_h, input_c=input_c, params=params, is_training=False) else: output, output_h = model( input_data=input_data, input_h=input_h, params=params, is_training=False) output_sum = math_ops.reduce_sum(output) output_h_sum = math_ops.reduce_sum(output_h) total_sum = output_sum + output_h_sum if has_input_c: output_c_sum = math_ops.reduce_sum(output_c) total_sum += output_c_sum with self.test_session(use_gpu=True, graph=ops.get_default_graph()) as sess: sess.run(variables.global_variables_initializer()) total_sum_v = sess.run([total_sum]) self.assertAllClose( total_sum_v[0], expected, atol=tolerance, rtol=tolerance) @unittest.skipUnless(test.is_built_with_cuda(), "Test only applicable when running on GPUs") def testSimpleInference(self): # Cudnn scales result for dropout during training, therefore dropout has no # impact for inference results. # (lstm, gru, rnn_tanh are saturated in the test. rnn_relu case is most # demonstrative of the dropout-invariant nature of CudnnRnn.) test_configs = [ { "rnn_mode": cudnn_rnn_ops.CUDNN_LSTM, "dropout": [0., 0.5, 1.], "expected": 231833.22, "tolerance": 1e-2, "shape": { "num_layers": 4, "num_units": 200, "input_size": 200, "batch_size": 20, "seq_length": 10, "dir_count": 1, }, }, { "rnn_mode": cudnn_rnn_ops.CUDNN_GRU, "dropout": [0., 0.5, 1.], "expected": 56000, "tolerance": 1e-2, "shape": { "num_layers": 4, "num_units": 200, "input_size": 200, "batch_size": 20, "seq_length": 10, "dir_count": 1, }, }, { "rnn_mode": cudnn_rnn_ops.CUDNN_RNN_TANH, "dropout": [0., 0.5, 1.], "expected": 56000, "tolerance": 1e-2, "shape": { "num_layers": 4, "num_units": 200, "input_size": 200, "batch_size": 20, "seq_length": 10, "dir_count": 1, }, }, { "rnn_mode": cudnn_rnn_ops.CUDNN_RNN_RELU, "dropout": [0., 0.5, 1.], "expected": 130688, "tolerance": 1e-2, "shape": { "num_layers": 2, "num_units": 8, "input_size": 4, "batch_size": 4, "seq_length": 2, "dir_count": 1, }, }, ] with ops.Graph().as_default(): for config in test_configs: rnn_mode = config["rnn_mode"] dropout_list = config.get("dropout", [0.]) expected = config["expected"] tolerance = config["tolerance"] shape = config["shape"] for dropout in dropout_list: self._testOneSimpleInference( rnn_mode, shape["num_layers"], shape["num_units"], shape["input_size"], shape["batch_size"], shape["seq_length"], shape["dir_count"], dropout, expected, tolerance) def _testOneSimpleTraining(self, rnn_mode, num_layers, num_units, input_size, batch_size, seq_length, dir_count, dropout, tolerance): # Gradient checking runs two forward ops with almost the same input. Need to # make sure the drop patterns across the two runs are the same. old_env_state = os.environ.get("TF_CUDNN_RESET_RND_GEN_STATE", str(False)) os.environ["TF_CUDNN_RESET_RND_GEN_STATE"] = str(True) has_input_c = (rnn_mode == cudnn_rnn_ops.CUDNN_LSTM) random_seed.set_random_seed(1234) model = self._CreateModel( rnn_mode, num_layers, num_units, input_size, dropout=dropout) params_size_t = model.params_size() input_data = variables.Variable( random_ops.random_uniform([seq_length, batch_size, input_size])) input_h = variables.Variable( random_ops.random_uniform( [num_layers * dir_count, batch_size, num_units])) params = variables.Variable( random_ops.random_uniform([params_size_t]), validate_shape=False) if has_input_c: input_c = variables.Variable( random_ops.random_uniform( [num_layers * dir_count, batch_size, num_units])) output, output_h, output_c = model( input_data=input_data, input_h=input_h, input_c=input_c, params=params) else: output, output_h = model( input_data=input_data, input_h=input_h, params=params) output_sum = math_ops.reduce_sum(output) output_h_sum = math_ops.reduce_sum(output_h) total_sum = output_sum + output_h_sum if has_input_c: output_c_sum = math_ops.reduce_sum(output_c) total_sum += output_c_sum with self.test_session(use_gpu=True) as sess: params_size_v = sess.run(params_size_t) inputs_and_shapes = [ (input_data, [seq_length, batch_size, input_size]), (input_h, [num_layers * dir_count, batch_size, num_units]), (params, [params_size_v]), ] if has_input_c: inputs_and_shapes.append( (input_c, [num_layers * dir_count, batch_size, num_units]),) sess.run(variables.global_variables_initializer()) all_inputs = [entry[0] for entry in inputs_and_shapes] all_shapes = [entry[1] for entry in inputs_and_shapes] err = gradient_checker.compute_gradient_error(all_inputs, all_shapes, total_sum, [1]) self.assertLess(err, tolerance) os.environ["TF_CUDNN_RESET_RND_GEN_STATE"] = old_env_state @unittest.skipUnless(test.is_built_with_cuda(), "Test only applicable when running on GPUs") def testSimpleTraining(self): test_configs = [ { "rnn_mode": cudnn_rnn_ops.CUDNN_LSTM, "dropout": [0., 0.5, 1.], "tolerance": 1e-2, "shape": { "num_layers": 2, "num_units": 3, "input_size": 4, "batch_size": 3, "seq_length": 4, "dir_count": 1, }, }, { "rnn_mode": cudnn_rnn_ops.CUDNN_GRU, "dropout": [0., 0.5, 1.], "tolerance": 4e-3, "shape": { "num_layers": 2, "num_units": 3, "input_size": 4, "batch_size": 3, "seq_length": 4, "dir_count": 1, }, }, { "rnn_mode": cudnn_rnn_ops.CUDNN_RNN_TANH, "dropout": [0., 0.5, 1.], "tolerance": 5e-3, "shape": { "num_layers": 2, "num_units": 3, "input_size": 4, "batch_size": 3, "seq_length": 4, "dir_count": 1, }, }, { "rnn_mode": cudnn_rnn_ops.CUDNN_RNN_RELU, "dropout": [0., 0.5, 1.], "tolerance": 4e-1, "shape": { "num_layers": 2, "num_units": 3, "input_size": 4, "batch_size": 3, "seq_length": 4, "dir_count": 1, }, }, ] ops.reset_default_graph() with ops.Graph().as_default(): for config in test_configs: rnn_mode = config["rnn_mode"] dropout_list = config.get("dropout", [0.]) tolerance = config["tolerance"] shape = config["shape"] for dropout in dropout_list: self._testOneSimpleTraining(rnn_mode, shape["num_layers"], shape["num_units"], shape["input_size"], shape["batch_size"], shape["seq_length"], shape["dir_count"], dropout, tolerance) if __name__ == "__main__": googletest.main()
apache-2.0
eepalms/gem5-newcache
src/arch/x86/isa/insts/simd128/integer/logical/exclusive_or.py
91
2667
# Copyright (c) 2007 The Hewlett-Packard Development Company # All rights reserved. # # The license below extends only to copyright in the software and shall # not be construed as granting a license to any other intellectual # property including but not limited to intellectual property relating # to a hardware implementation of the functionality of the software # licensed hereunder. You may use the software subject to the license # terms below provided that you ensure that this notice is replicated # unmodified and in its entirety in all distributions of the software, # modified or unmodified, in source code or in binary form. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Gabe Black microcode = ''' def macroop PXOR_XMM_XMM { mxor xmml, xmml, xmmlm mxor xmmh, xmmh, xmmhm }; def macroop PXOR_XMM_M { lea t1, seg, sib, disp, dataSize=asz ldfp ufp1, seg, [1, t0, t1], dataSize=8 ldfp ufp2, seg, [1, t0, t1], 8, dataSize=8 mxor xmml, xmml, ufp1 mxor xmmh, xmmh, ufp2 }; def macroop PXOR_XMM_P { rdip t7 lea t1, seg, riprel, disp, dataSize=asz ldfp ufp1, seg, [1, t0, t1], dataSize=8 ldfp ufp2, seg, [1, t0, t1], 8, dataSize=8 mxor xmml, xmml, ufp1 mxor xmmh, xmmh, ufp2 }; '''
bsd-3-clause
dllsf/odootest
addons/project_issue/res_config.py
441
1492
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv class project_issue_settings(osv.osv_memory): _name = 'project.config.settings' _inherit = ['project.config.settings', 'fetchmail.config.settings'] _columns = { 'fetchmail_issue': fields.boolean("Create issues from an incoming email account ", fetchmail_model='project.issue', fetchmail_name='Incoming Issues', help="""Allows you to configure your incoming mail server, and create issues from incoming emails."""), }
agpl-3.0
pelikanchik/edx-platform
common/djangoapps/student/tests/test_userstanding.py
18
3972
""" These are tests for disabling and enabling student accounts, and for making sure that students with disabled accounts are unable to access the courseware. """ from student.tests.factories import UserFactory, UserStandingFactory from student.models import UserStanding from django.test import TestCase, Client from django.core.urlresolvers import reverse, NoReverseMatch from nose.plugins.skip import SkipTest class UserStandingTest(TestCase): """test suite for user standing view for enabling and disabling accounts""" def setUp(self): # create users self.bad_user = UserFactory.create( username='bad_user', ) self.good_user = UserFactory.create( username='good_user', ) self.non_staff = UserFactory.create( username='non_staff', ) self.admin = UserFactory.create( username='admin', is_staff=True, ) # create clients self.bad_user_client = Client() self.good_user_client = Client() self.non_staff_client = Client() self.admin_client = Client() for user, client in [ (self.bad_user, self.bad_user_client), (self.good_user, self.good_user_client), (self.non_staff, self.non_staff_client), (self.admin, self.admin_client), ]: client.login(username=user.username, password='test') UserStandingFactory.create( user=self.bad_user, account_status=UserStanding.ACCOUNT_DISABLED, changed_by=self.admin ) # set different stock urls for lms and cms # to test disabled accounts' access to site try: self.some_url = reverse('dashboard') except NoReverseMatch: self.some_url = '/course' # since it's only possible to disable accounts from lms, we're going # to skip tests for cms def test_disable_account(self): self.assertEqual( UserStanding.objects.filter(user=self.good_user).count(), 0 ) try: response = self.admin_client.post(reverse('disable_account_ajax'), { 'username': self.good_user.username, 'account_action': 'disable', }) except NoReverseMatch: raise SkipTest() self.assertEqual( UserStanding.objects.get(user=self.good_user).account_status, UserStanding.ACCOUNT_DISABLED ) def test_disabled_account_403s(self): response = self.bad_user_client.get(self.some_url) self.assertEqual(response.status_code, 403) def test_reenable_account(self): try: response = self.admin_client.post(reverse('disable_account_ajax'), { 'username': self.bad_user.username, 'account_action': 'reenable' }) except NoReverseMatch: raise SkipTest() self.assertEqual( UserStanding.objects.get(user=self.bad_user).account_status, UserStanding.ACCOUNT_ENABLED ) def test_non_staff_cant_access_disable_view(self): try: response = self.non_staff_client.get(reverse('manage_user_standing'), { 'user': self.non_staff, }) except NoReverseMatch: raise SkipTest() self.assertEqual(response.status_code, 404) def test_non_staff_cant_disable_account(self): try: response = self.non_staff_client.post(reverse('disable_account_ajax'), { 'username': self.good_user.username, 'user': self.non_staff, 'account_action': 'disable' }) except NoReverseMatch: raise SkipTest() self.assertEqual(response.status_code, 404) self.assertEqual( UserStanding.objects.filter(user=self.good_user).count(), 0 )
agpl-3.0
takahiro33/proactive
.waf-1.7.9-786a25f4411038005f1f2ec0d121c503/waflib/Tools/compiler_c.py
343
1759
#! /usr/bin/env python # encoding: utf-8 # WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file import os,sys,imp,types from waflib.Tools import ccroot from waflib import Utils,Configure from waflib.Logs import debug c_compiler={'win32':['msvc','gcc'],'cygwin':['gcc'],'darwin':['gcc'],'aix':['xlc','gcc'],'linux':['gcc','icc'],'sunos':['suncc','gcc'],'irix':['gcc','irixcc'],'hpux':['gcc'],'gnu':['gcc'],'java':['gcc','msvc','icc'],'default':['gcc'],} def configure(conf): try:test_for_compiler=conf.options.check_c_compiler except AttributeError:conf.fatal("Add options(opt): opt.load('compiler_c')") for compiler in test_for_compiler.split(): conf.env.stash() conf.start_msg('Checking for %r (c compiler)'%compiler) try: conf.load(compiler) except conf.errors.ConfigurationError ,e: conf.env.revert() conf.end_msg(False) debug('compiler_c: %r'%e) else: if conf.env['CC']: conf.end_msg(conf.env.get_flat('CC')) conf.env['COMPILER_CC']=compiler break conf.end_msg(False) else: conf.fatal('could not configure a c compiler!') def options(opt): opt.load_special_tools('c_*.py',ban=['c_dumbpreproc.py']) global c_compiler build_platform=Utils.unversioned_sys_platform() possible_compiler_list=c_compiler[build_platform in c_compiler and build_platform or'default'] test_for_compiler=' '.join(possible_compiler_list) cc_compiler_opts=opt.add_option_group("C Compiler Options") cc_compiler_opts.add_option('--check-c-compiler',default="%s"%test_for_compiler,help='On this platform (%s) the following C-Compiler will be checked by default: "%s"'%(build_platform,test_for_compiler),dest="check_c_compiler") for x in test_for_compiler.split(): opt.load('%s'%x)
agpl-3.0
jolyonb/edx-platform
common/djangoapps/microsite_configuration/tests/backends/test_filebased.py
1
5844
""" Test Microsite filebased backends. """ from __future__ import absolute_import import six import unittest from mock import patch from django.test import TestCase from django.conf import settings from django.urls import reverse from microsite_configuration.backends.base import ( BaseMicrositeBackend, BaseMicrositeTemplateBackend, ) from microsite_configuration import microsite from student.tests.factories import CourseEnrollmentFactory, UserFactory from xmodule.modulestore.tests.factories import CourseFactory from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase @patch( 'microsite_configuration.microsite.BACKEND', microsite.get_backend( 'microsite_configuration.backends.filebased.FilebasedMicrositeBackend', BaseMicrositeBackend ) ) class FilebasedMicrositeBackendTests(TestCase): """ Go through and test the FilebasedMicrositeBackend class """ def setUp(self): super(FilebasedMicrositeBackendTests, self).setUp() self.microsite_subdomain = 'test-site' def tearDown(self): super(FilebasedMicrositeBackendTests, self).tearDown() microsite.clear() def test_get_value(self): """ Tests microsite.get_value works as expected. """ microsite.set_by_domain(self.microsite_subdomain) self.assertEqual(microsite.get_value('platform_name'), 'Test Site') def test_is_request_in_microsite(self): """ Tests microsite.is_request_in_microsite works as expected. """ microsite.set_by_domain(self.microsite_subdomain) self.assertTrue(microsite.is_request_in_microsite()) def test_has_override_value(self): """ Tests microsite.has_override_value works as expected. """ microsite.set_by_domain(self.microsite_subdomain) self.assertTrue(microsite.has_override_value('platform_name')) def test_get_value_for_org(self): """ Tests microsite.get_value_for_org works as expected. """ microsite.set_by_domain(self.microsite_subdomain) self.assertEqual( microsite.get_value_for_org('TestSiteX', 'platform_name'), 'Test Site' ) # if no config is set microsite.clear() with patch('django.conf.settings.MICROSITE_CONFIGURATION', False): self.assertEqual( microsite.get_value_for_org('TestSiteX', 'platform_name', 'Default Value'), 'Default Value' ) def test_get_all_orgs(self): """ Tests microsite.get_all_orgs works as expected. """ microsite.set_by_domain(self.microsite_subdomain) self.assertEqual( microsite.get_all_orgs(), set(['TestSiteX', 'LogistrationX']) ) # if no config is set microsite.clear() with patch('django.conf.settings.MICROSITE_CONFIGURATION', False): self.assertEqual( microsite.get_all_orgs(), set() ) def test_clear(self): """ Tests microsite.clear works as expected. """ microsite.set_by_domain(self.microsite_subdomain) self.assertEqual( microsite.get_value('platform_name'), 'Test Site' ) microsite.clear() self.assertIsNone(microsite.get_value('platform_name')) def test_get_all_configs(self): """ Tests microsite.get_all_config works as expected. """ microsite.set_by_domain(self.microsite_subdomain) configs = microsite.get_all_config() self.assertEqual(len(list(configs.keys())), 3) def test_set_config_by_domain(self): """ Tests microsite.set_config_by_domain works as expected. """ microsite.clear() # if microsite config does not exist default config should be used microsite.set_by_domain('unknown') self.assertEqual(microsite.get_value('university'), 'default_university') def test_has_configuration_set(self): """ Tests microsite.has_configuration_set works as expected. """ self.assertTrue(microsite.BACKEND.has_configuration_set()) with patch('django.conf.settings.MICROSITE_CONFIGURATION', {}): self.assertFalse(microsite.BACKEND.has_configuration_set()) @patch( 'microsite_configuration.microsite.TEMPLATES_BACKEND', microsite.get_backend( 'microsite_configuration.backends.filebased.FilebasedMicrositeTemplateBackend', BaseMicrositeTemplateBackend ) ) @unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms') class FilebasedMicrositeTemplateBackendTests(ModuleStoreTestCase): """ Go through and test the FilebasedMicrositeTemplateBackend class """ def setUp(self): super(FilebasedMicrositeTemplateBackendTests, self).setUp() self.microsite_subdomain = 'test-site' self.course = CourseFactory.create() self.user = UserFactory.create(username="Bob", email="bob@example.com", password="edx") self.client.login(username=self.user.username, password="edx") def test_get_template_path(self): """ Tests get template path works for both relative and absolute paths. """ microsite.set_by_domain(self.microsite_subdomain) CourseEnrollmentFactory( course_id=self.course.id, user=self.user ) response = self.client.get( reverse('syllabus', args=[six.text_type(self.course.id)]), HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME, ) self.assertContains(response, "Microsite relative path template contents") self.assertContains(response, "Microsite absolute path template contents")
agpl-3.0
barryrobison/arsenalsuite
cpp/lib/PyQt4/examples/widgets/styles.py
20
9203
#!/usr/bin/env python ############################################################################# ## ## Copyright (C) 2010 Riverbank Computing Limited. ## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies). ## All rights reserved. ## ## This file is part of the examples of PyQt. ## ## $QT_BEGIN_LICENSE:BSD$ ## You may use this file under the terms of the BSD license as follows: ## ## "Redistribution and use in source and binary forms, with or without ## modification, are permitted provided that the following conditions are ## met: ## * Redistributions of source code must retain the above copyright ## notice, this list of conditions and the following disclaimer. ## * Redistributions in binary form must reproduce the above copyright ## notice, this list of conditions and the following disclaimer in ## the documentation and/or other materials provided with the ## distribution. ## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor ## the names of its contributors may be used to endorse or promote ## products derived from this software without specific prior written ## permission. ## ## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE." ## $QT_END_LICENSE$ ## ############################################################################# from PyQt4 import QtCore, QtGui class WidgetGallery(QtGui.QDialog): def __init__(self, parent=None): super(WidgetGallery, self).__init__(parent) self.originalPalette = QtGui.QApplication.palette() styleComboBox = QtGui.QComboBox() styleComboBox.addItems(QtGui.QStyleFactory.keys()) styleLabel = QtGui.QLabel("&Style:") styleLabel.setBuddy(styleComboBox) self.useStylePaletteCheckBox = QtGui.QCheckBox("&Use style's standard palette") self.useStylePaletteCheckBox.setChecked(True) disableWidgetsCheckBox = QtGui.QCheckBox("&Disable widgets") self.createTopLeftGroupBox() self.createTopRightGroupBox() self.createBottomLeftTabWidget() self.createBottomRightGroupBox() self.createProgressBar() styleComboBox.activated[str].connect(self.changeStyle) self.useStylePaletteCheckBox.toggled.connect(self.changePalette) disableWidgetsCheckBox.toggled.connect(self.topLeftGroupBox.setDisabled) disableWidgetsCheckBox.toggled.connect(self.topRightGroupBox.setDisabled) disableWidgetsCheckBox.toggled.connect(self.bottomLeftTabWidget.setDisabled) disableWidgetsCheckBox.toggled.connect(self.bottomRightGroupBox.setDisabled) topLayout = QtGui.QHBoxLayout() topLayout.addWidget(styleLabel) topLayout.addWidget(styleComboBox) topLayout.addStretch(1) topLayout.addWidget(self.useStylePaletteCheckBox) topLayout.addWidget(disableWidgetsCheckBox) mainLayout = QtGui.QGridLayout() mainLayout.addLayout(topLayout, 0, 0, 1, 2) mainLayout.addWidget(self.topLeftGroupBox, 1, 0) mainLayout.addWidget(self.topRightGroupBox, 1, 1) mainLayout.addWidget(self.bottomLeftTabWidget, 2, 0) mainLayout.addWidget(self.bottomRightGroupBox, 2, 1) mainLayout.addWidget(self.progressBar, 3, 0, 1, 2) mainLayout.setRowStretch(1, 1) mainLayout.setRowStretch(2, 1) mainLayout.setColumnStretch(0, 1) mainLayout.setColumnStretch(1, 1) self.setLayout(mainLayout) self.setWindowTitle("Styles") self.changeStyle('Windows') def changeStyle(self, styleName): QtGui.QApplication.setStyle(QtGui.QStyleFactory.create(styleName)) self.changePalette() def changePalette(self): if (self.useStylePaletteCheckBox.isChecked()): QtGui.QApplication.setPalette(QtGui.QApplication.style().standardPalette()) else: QtGui.QApplication.setPalette(self.originalPalette) def advanceProgressBar(self): curVal = self.progressBar.value() maxVal = self.progressBar.maximum() self.progressBar.setValue(curVal + (maxVal - curVal) / 100) def createTopLeftGroupBox(self): self.topLeftGroupBox = QtGui.QGroupBox("Group 1") radioButton1 = QtGui.QRadioButton("Radio button 1") radioButton2 = QtGui.QRadioButton("Radio button 2") radioButton3 = QtGui.QRadioButton("Radio button 3") radioButton1.setChecked(True) checkBox = QtGui.QCheckBox("Tri-state check box") checkBox.setTristate(True) checkBox.setCheckState(QtCore.Qt.PartiallyChecked) layout = QtGui.QVBoxLayout() layout.addWidget(radioButton1) layout.addWidget(radioButton2) layout.addWidget(radioButton3) layout.addWidget(checkBox) layout.addStretch(1) self.topLeftGroupBox.setLayout(layout) def createTopRightGroupBox(self): self.topRightGroupBox = QtGui.QGroupBox("Group 2") defaultPushButton = QtGui.QPushButton("Default Push Button") defaultPushButton.setDefault(True) togglePushButton = QtGui.QPushButton("Toggle Push Button") togglePushButton.setCheckable(True) togglePushButton.setChecked(True) flatPushButton = QtGui.QPushButton("Flat Push Button") flatPushButton.setFlat(True) layout = QtGui.QVBoxLayout() layout.addWidget(defaultPushButton) layout.addWidget(togglePushButton) layout.addWidget(flatPushButton) layout.addStretch(1) self.topRightGroupBox.setLayout(layout) def createBottomLeftTabWidget(self): self.bottomLeftTabWidget = QtGui.QTabWidget() self.bottomLeftTabWidget.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Ignored) tab1 = QtGui.QWidget() tableWidget = QtGui.QTableWidget(10, 10) tab1hbox = QtGui.QHBoxLayout() tab1hbox.setMargin(5) tab1hbox.addWidget(tableWidget) tab1.setLayout(tab1hbox) tab2 = QtGui.QWidget() textEdit = QtGui.QTextEdit() textEdit.setPlainText("Twinkle, twinkle, little star,\n" "How I wonder what you are.\n" "Up above the world so high,\n" "Like a diamond in the sky.\n" "Twinkle, twinkle, little star,\n" "How I wonder what you are!\n") tab2hbox = QtGui.QHBoxLayout() tab2hbox.setMargin(5) tab2hbox.addWidget(textEdit) tab2.setLayout(tab2hbox) self.bottomLeftTabWidget.addTab(tab1, "&Table") self.bottomLeftTabWidget.addTab(tab2, "Text &Edit") def createBottomRightGroupBox(self): self.bottomRightGroupBox = QtGui.QGroupBox("Group 3") self.bottomRightGroupBox.setCheckable(True) self.bottomRightGroupBox.setChecked(True) lineEdit = QtGui.QLineEdit('s3cRe7') lineEdit.setEchoMode(QtGui.QLineEdit.Password) spinBox = QtGui.QSpinBox(self.bottomRightGroupBox) spinBox.setValue(50) dateTimeEdit = QtGui.QDateTimeEdit(self.bottomRightGroupBox) dateTimeEdit.setDateTime(QtCore.QDateTime.currentDateTime()) slider = QtGui.QSlider(QtCore.Qt.Horizontal, self.bottomRightGroupBox) slider.setValue(40) scrollBar = QtGui.QScrollBar(QtCore.Qt.Horizontal, self.bottomRightGroupBox) scrollBar.setValue(60) dial = QtGui.QDial(self.bottomRightGroupBox) dial.setValue(30) dial.setNotchesVisible(True) layout = QtGui.QGridLayout() layout.addWidget(lineEdit, 0, 0, 1, 2) layout.addWidget(spinBox, 1, 0, 1, 2) layout.addWidget(dateTimeEdit, 2, 0, 1, 2) layout.addWidget(slider, 3, 0) layout.addWidget(scrollBar, 4, 0) layout.addWidget(dial, 3, 1, 2, 1) layout.setRowStretch(5, 1) self.bottomRightGroupBox.setLayout(layout) def createProgressBar(self): self.progressBar = QtGui.QProgressBar() self.progressBar.setRange(0, 10000) self.progressBar.setValue(0) timer = QtCore.QTimer(self) timer.timeout.connect(self.advanceProgressBar) timer.start(1000) if __name__ == '__main__': import sys app = QtGui.QApplication(sys.argv) gallery = WidgetGallery() gallery.show() sys.exit(app.exec_())
gpl-2.0
18F/rdbms-subsetter
dialects/postgres.py
1
1790
import re import sqlalchemy as sa from sqlalchemy import cast from sqlalchemy.dialects.postgresql import ARRAY, ENUM def sql_enum_to_list(value): """ Interprets PostgreSQL's array syntax in terms of a list Enums come back from SQL as '{val1,val2,val3}' """ if value is None: return [] inner = re.match(r"^{(.*)}$", value).group(1) return inner.split(",") class ArrayOfEnum(ARRAY): """ Workaround for array-of-enum problem See http://docs.sqlalchemy.org/en/latest/dialects/postgresql.html#postgresql-array-of-enum """ def bind_expression(self, bindvalue): return cast(bindvalue, self) def result_processor(self, dialect, coltype): super_rp = super(ArrayOfEnum, self).result_processor(dialect, coltype) def process(value): # Convert array to Python objects return super_rp(sql_enum_to_list(value)) return process def fix_postgres_array_of_enum(connection, tbl): "Change type of ENUM[] columns to a custom type" for col in tbl.c: col_str = str(col.type) if col_str.endswith('[]'): # this is an array enum_name = col_str[:-2] try: # test if 'enum_name' is an enum enum_ranges = connection.execute(''' SELECT enum_range(NULL::%s); ''' % enum_name).fetchone() enum_values = sql_enum_to_list(enum_ranges[0]) enum = ENUM(*enum_values, name=enum_name) tbl.c[col.name].type = ArrayOfEnum(enum) except sa.exc.ProgrammingError as enum_excep: if 'does not exist' in str(enum_excep): pass # Must not have been an enum else: raise
cc0-1.0
iver333/phantomjs
src/qt/qtwebkit/Tools/QueueStatusServer/handlers/gc.py
146
2038
# Copyright (C) 2009 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from google.appengine.ext import webapp from model.queuestatus import QueueStatus class GC(webapp.RequestHandler): def get(self): statuses = QueueStatus.all().order("-date") seen_queues = set() for status in statuses: if status.active_patch_id or status.active_bug_id: continue if status.queue_name in seen_queues: status.delete() seen_queues.add(status.queue_name) self.response.out.write("Done!")
bsd-3-clause
wuhengzhi/chromium-crosswalk
tools/perf/measurements/v8_gc_times_unittest.py
14
16133
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from telemetry import decorators from telemetry.internal.results import page_test_results from telemetry.page import page as page_module from telemetry.testing import options_for_unittests from telemetry.testing import page_test_test_case from telemetry.timeline import model as model_module from telemetry.util import wpr_modes from measurements import v8_gc_times class V8GCTimesTestPageHelper(object): def __init__(self, page_set): self._page_set = page_set self._model = model_module.TimelineModel() self._renderer_process = self._model.GetOrCreateProcess(1) self._renderer_thread = self._renderer_process.GetOrCreateThread(2) self._renderer_thread.name = 'CrRendererMain' def AddEvent(self, category, name, thread_start, thread_duration, args=None, wall_start=None, wall_duration=None): wall_start = wall_start or thread_start wall_duration = wall_duration or thread_duration self._renderer_thread.BeginSlice(category, name, wall_start, thread_start, args=args) self._renderer_thread.EndSlice(wall_start + wall_duration, thread_start + thread_duration) class MockV8GCTimesPage(page_module.Page): def __init__(self, page_set): super(V8GCTimesTestPageHelper.MockV8GCTimesPage, self).__init__( 'file://blank.html', page_set, page_set.base_dir) def MeasureFakePage(self): # Create a fake page and add it to the page set. results = page_test_results.PageTestResults() page = V8GCTimesTestPageHelper.MockV8GCTimesPage(self._page_set) self._page_set.AddStory(page) # Pretend we're about to run the tests to silence lower level asserts. results.WillRunPage(page) v8_gc_times_metric = v8_gc_times.V8GCTimes() # pylint: disable=protected-access v8_gc_times_metric._renderer_process = self._renderer_process # Finalize the timeline import. self._model.FinalizeImport() # Measure the V8GCTimes metric and return the results # pylint: disable=protected-access v8_gc_times_metric._AddV8MetricsToResults(self._renderer_process, results) results.DidRunPage(page) return results class V8GCTimesTests(page_test_test_case.PageTestTestCase): def setUp(self): self._options = options_for_unittests.GetCopy() self._options.browser_options.wpr_mode = wpr_modes.WPR_OFF def testWithNoTraceEvents(self): test_page_helper = V8GCTimesTestPageHelper( self.CreateEmptyPageSet()) results = test_page_helper.MeasureFakePage() self._AssertResultsEqual(_GetEmptyResults(), _ActualValues(results)) def testWithNoGarbageCollectionEvents(self): test_page_helper = V8GCTimesTestPageHelper( self.CreateEmptyPageSet()) test_page_helper.AddEvent( 'toplevel', 'PostMessage', thread_start=0, thread_duration=14, wall_start=5, wall_duration=35) results = test_page_helper.MeasureFakePage() expected = _GetEmptyResults() expected['duration'] = ('ms', 35) expected['cpu_time'] = ('ms', 14) self._AssertResultsEqual(expected, _ActualValues(results)) def testWithGarbageCollectionEvents(self): test_page_helper = V8GCTimesTestPageHelper( self.CreateEmptyPageSet()) test_page_helper.AddEvent( 'toplevel', 'PostMessage', thread_start=0, thread_duration=77, wall_start=5, wall_duration=88) test_page_helper.AddEvent('v8', 'V8.GCScavenger', 5, 4) test_page_helper.AddEvent('v8', 'V8.GCScavenger', 15, 3) test_page_helper.AddEvent('v8', 'V8.GCIncrementalMarking', 23, 4) test_page_helper.AddEvent('v8', 'V8.GCIncrementalMarking', 34, 2) test_page_helper.AddEvent('v8', 'V8.GCFinalizeMC', 38, 2) test_page_helper.AddEvent('v8', 'V8.GCFinalizeMC', 42, 3) test_page_helper.AddEvent('v8', 'V8.GCFinalizeMCReduceMemory', 46, 4) test_page_helper.AddEvent('v8', 'V8.GCFinalizeMCReduceMemory', 51, 5) test_page_helper.AddEvent('v8', 'V8.GCCompactor', 62, 4) test_page_helper.AddEvent('v8', 'V8.GCCompactor', 72, 5) results = test_page_helper.MeasureFakePage() expected = _GetEmptyResults() expected['duration'] = ('ms', 88) expected['cpu_time'] = ('ms', 77) expected['v8_gc_incremental_marking'] = ('ms', 6.0) expected['v8_gc_incremental_marking_average'] = ('ms', 3.0) expected['v8_gc_incremental_marking_count'] = ('count', 2) expected['v8_gc_incremental_marking_max'] = ('ms', 4.0) expected['v8_gc_incremental_marking_outside_idle'] = ('ms', 6.0) expected['v8_gc_finalize_incremental'] = ('ms', 5.0) expected['v8_gc_finalize_incremental_average'] = ('ms', 2.5) expected['v8_gc_finalize_incremental_count'] = ('count', 2) expected['v8_gc_finalize_incremental_max'] = ('ms', 3.0) expected['v8_gc_finalize_incremental_outside_idle'] = ('ms', 5.0) expected['v8_gc_finalize_incremental_reduce_memory'] = ('ms', 9.0) expected['v8_gc_finalize_incremental_reduce_memory_average'] = ('ms', 4.5) expected['v8_gc_finalize_incremental_reduce_memory_count'] = ('count', 2) expected['v8_gc_finalize_incremental_reduce_memory_max'] = ('ms', 5.0) expected['v8_gc_finalize_incremental_reduce_memory_outside_idle'] = ( 'ms', 9.0) expected['v8_gc_scavenger'] = ('ms', 7.0) expected['v8_gc_scavenger_average'] = ('ms', 3.5) expected['v8_gc_scavenger_count'] = ('count', 2) expected['v8_gc_scavenger_max'] = ('ms', 4.0) expected['v8_gc_scavenger_outside_idle'] = ('ms', 7.0) expected['v8_gc_mark_compactor'] = ('ms', 9.0) expected['v8_gc_mark_compactor_average'] = ('ms', 4.5) expected['v8_gc_mark_compactor_count'] = ('count', 2) expected['v8_gc_mark_compactor_max'] = ('ms', 5.0) expected['v8_gc_mark_compactor_outside_idle'] = ('ms', 9.0) expected['v8_gc_total'] = ('ms', 36.0) expected['v8_gc_total_outside_idle'] = ('ms', 36.0) self._AssertResultsEqual(expected, _ActualValues(results)) def testWithIdleTaskGarbageCollectionEvents(self): test_page_helper = V8GCTimesTestPageHelper( self.CreateEmptyPageSet()) test_page_helper.AddEvent( 'toplevel', 'PostMessage', thread_start=0, thread_duration=57, wall_start=5, wall_duration=68) test_page_helper.AddEvent('v8', 'V8.GCScavenger', 5, 4) test_page_helper.AddEvent( 'renderer.scheduler', 'SingleThreadIdleTaskRunner::RunTask', 15, 4, {'allotted_time_ms': 12}) test_page_helper.AddEvent('v8', 'V8.GCScavenger', 15, 3) test_page_helper.AddEvent('v8', 'V8.GCIncrementalMarking', 23, 4) test_page_helper.AddEvent( 'renderer.scheduler', 'SingleThreadIdleTaskRunner::RunTask', 34, 3, {'allotted_time_ms': 12}) test_page_helper.AddEvent('v8', 'V8.GCIncrementalMarking', 34, 2) test_page_helper.AddEvent('v8', 'V8.GCCompactor', 42, 4) test_page_helper.AddEvent( 'renderer.scheduler', 'SingleThreadIdleTaskRunner::RunTask', 52, 6, {'allotted_time_ms': 12}) test_page_helper.AddEvent('v8', 'V8.GCCompactor', 52, 5) results = test_page_helper.MeasureFakePage() expected = _GetEmptyResults() expected['duration'] = ('ms', 68) expected['cpu_time'] = ('ms', 57) expected['v8_gc_incremental_marking'] = ('ms', 6.0) expected['v8_gc_incremental_marking_average'] = ('ms', 3.0) expected['v8_gc_incremental_marking_count'] = ('count', 2) expected['v8_gc_incremental_marking_max'] = ('ms', 4.0) expected['v8_gc_incremental_marking_outside_idle'] = ('ms', 4.0) expected['v8_gc_incremental_marking_percentage_idle'] = \ ('idle%', 100 * 2 / 6.0) expected['v8_gc_scavenger'] = ('ms', 7.0) expected['v8_gc_scavenger_average'] = ('ms', 3.5) expected['v8_gc_scavenger_count'] = ('count', 2) expected['v8_gc_scavenger_max'] = ('ms', 4.0) expected['v8_gc_scavenger_outside_idle'] = ('ms', 4.0) expected['v8_gc_scavenger_percentage_idle'] = ('idle%', 100 * 3 / 7.0) expected['v8_gc_mark_compactor'] = ('ms', 9.0) expected['v8_gc_mark_compactor_average'] = ('ms', 4.5) expected['v8_gc_mark_compactor_count'] = ('count', 2) expected['v8_gc_mark_compactor_max'] = ('ms', 5.0) expected['v8_gc_mark_compactor_outside_idle'] = ('ms', 4.0) expected['v8_gc_mark_compactor_percentage_idle'] = ('idle%', 100 * 5 / 9.0) expected['v8_gc_total'] = ('ms', 22.0) expected['v8_gc_total_outside_idle'] = ('ms', 12.0) expected['v8_gc_total_percentage_idle'] = ('idle%', 100 * 10 / 22.0) self._AssertResultsEqual(expected, _ActualValues(results)) def testWithIdleTaskOverruns(self): test_page_helper = V8GCTimesTestPageHelper( self.CreateEmptyPageSet()) test_page_helper.AddEvent( 'toplevel', 'PostMessage', thread_start=0, thread_duration=80, wall_start=5, wall_duration=92) test_page_helper.AddEvent( 'renderer.scheduler', 'SingleThreadIdleTaskRunner::RunTask', 15, 15, {'allotted_time_ms': 8}) test_page_helper.AddEvent('v8', 'V8.GCScavenger', 15, 14) test_page_helper.AddEvent( 'renderer.scheduler', 'SingleThreadIdleTaskRunner::RunTask', 34, 15, {'allotted_time_ms': 6}) test_page_helper.AddEvent('v8', 'V8.GCIncrementalMarking', 34, 14) test_page_helper.AddEvent( 'renderer.scheduler', 'SingleThreadIdleTaskRunner::RunTask', 52, 23, {'allotted_time_ms': 9}) test_page_helper.AddEvent('v8', 'V8.GCCompactor', 52, 22) results = test_page_helper.MeasureFakePage() expected = _GetEmptyResults() expected['duration'] = ('ms', 92) expected['cpu_time'] = ('ms', 80) expected['v8_gc_incremental_marking'] = ('ms', 14.0) expected['v8_gc_incremental_marking_average'] = ('ms', 14.0) expected['v8_gc_incremental_marking_count'] = ('count', 1) expected['v8_gc_incremental_marking_max'] = ('ms', 14.0) expected['v8_gc_incremental_marking_outside_idle'] = ('ms', 8.0) expected['v8_gc_incremental_marking_idle_deadline_overrun'] = ('ms', 8.0) expected['v8_gc_incremental_marking_percentage_idle'] = \ ('idle%', 100 * 6 / 14.0) expected['v8_gc_scavenger'] = ('ms', 14.0) expected['v8_gc_scavenger_average'] = ('ms', 14.0) expected['v8_gc_scavenger_count'] = ('count', 1) expected['v8_gc_scavenger_max'] = ('ms', 14.0) expected['v8_gc_scavenger_outside_idle'] = ('ms', 6.0) expected['v8_gc_scavenger_idle_deadline_overrun'] = ('ms', 6.0) expected['v8_gc_scavenger_percentage_idle'] = ('idle%', 100 * 8 / 14.0) expected['v8_gc_mark_compactor'] = ('ms', 22.0) expected['v8_gc_mark_compactor_average'] = ('ms', 22.0) expected['v8_gc_mark_compactor_count'] = ('count', 1) expected['v8_gc_mark_compactor_max'] = ('ms', 22.0) expected['v8_gc_mark_compactor_outside_idle'] = ('ms', 13.0) expected['v8_gc_mark_compactor_idle_deadline_overrun'] = ('ms', 13.0) expected['v8_gc_mark_compactor_percentage_idle'] = ('idle%', 100 * 9 / 22.0) expected['v8_gc_total'] = ('ms', 50.0) expected['v8_gc_total_outside_idle'] = ('ms', 27.0) expected['v8_gc_total_idle_deadline_overrun'] = ('ms', 27.0) expected['v8_gc_total_percentage_idle'] = ('idle%', 100 * 23 / 50.0) self._AssertResultsEqual(expected, _ActualValues(results)) def testWithIdleTaskWallDurationOverruns(self): test_page_helper = V8GCTimesTestPageHelper( self.CreateEmptyPageSet()) test_page_helper.AddEvent( 'toplevel', 'PostMessage', thread_start=0, thread_duration=80, wall_start=5, wall_duration=92) test_page_helper.AddEvent( 'renderer.scheduler', 'SingleThreadIdleTaskRunner::RunTask', 15, 15, {'allotted_time_ms': 8}) test_page_helper.AddEvent( 'v8', 'V8.GCScavenger', thread_start=15, thread_duration=4, wall_start=15, wall_duration=14) results = test_page_helper.MeasureFakePage() expected = _GetEmptyResults() expected['duration'] = ('ms', 92) expected['cpu_time'] = ('ms', 80) expected['v8_gc_scavenger'] = ('ms', 4.0) expected['v8_gc_scavenger_average'] = ('ms', 4.0) expected['v8_gc_scavenger_count'] = ('count', 1) expected['v8_gc_scavenger_max'] = ('ms', 4.0) expected_outside_idle = 4.0 - (4.0 * 8 / 14) expected['v8_gc_scavenger_outside_idle'] = ('ms', expected_outside_idle) expected['v8_gc_scavenger_idle_deadline_overrun'] = ('ms', 6.0) expected['v8_gc_scavenger_percentage_idle'] = \ ('idle%', 100 * (4.0 - expected_outside_idle) / 4.0) expected['v8_gc_total'] = expected['v8_gc_scavenger'] expected['v8_gc_total_outside_idle'] = \ expected['v8_gc_scavenger_outside_idle'] expected['v8_gc_total_idle_deadline_overrun'] = \ expected['v8_gc_scavenger_idle_deadline_overrun'] expected['v8_gc_total_percentage_idle'] = \ expected['v8_gc_scavenger_percentage_idle'] self._AssertResultsEqual(expected, _ActualValues(results)) def _AssertResultsEqual(self, expected, actual): for key in expected.iterkeys(): self.assertIn(key, actual.keys()) self.assertEqual(expected[key], actual[key], 'Result for [' + key + '] - expected ' + str(expected[key]) + ' but got ' + str(actual[key])) @decorators.Disabled('win') # crbug.com/416502 def testCleanUpTrace(self): self.TestTracingCleanedUp(v8_gc_times.V8GCTimes, self._options) def _ActualValues(results): return dict(list( (v.name, (v.units, v.value)) for v in results.all_page_specific_values )) def _GetEmptyResults(): return {'cpu_time': ('ms', 0.0), 'duration': ('ms', 0.0), 'v8_gc_incremental_marking': ('ms', 0.0), 'v8_gc_incremental_marking_average': ('ms', 0.0), 'v8_gc_incremental_marking_count': ('count', 0), 'v8_gc_incremental_marking_max': ('ms', 0.0), 'v8_gc_incremental_marking_idle_deadline_overrun': ('ms', 0.0), 'v8_gc_incremental_marking_outside_idle': ('ms', 0.0), 'v8_gc_incremental_marking_percentage_idle': ('idle%', 0.0), 'v8_gc_finalize_incremental': ('ms', 0.0), 'v8_gc_finalize_incremental_average': ('ms', 0.0), 'v8_gc_finalize_incremental_count': ('count', 0), 'v8_gc_finalize_incremental_max': ('ms', 0.0), 'v8_gc_finalize_incremental_idle_deadline_overrun': ('ms', 0.0), 'v8_gc_finalize_incremental_outside_idle': ('ms', 0.0), 'v8_gc_finalize_incremental_percentage_idle': ('idle%', 0.0), 'v8_gc_finalize_incremental_reduce_memory': ('ms', 0.0), 'v8_gc_finalize_incremental_reduce_memory_average': ('ms', 0.0), 'v8_gc_finalize_incremental_reduce_memory_count': ('count', 0), 'v8_gc_finalize_incremental_reduce_memory_max': ('ms', 0.0), 'v8_gc_finalize_incremental_reduce_memory_idle_deadline_overrun': ('ms', 0.0), 'v8_gc_finalize_incremental_reduce_memory_outside_idle': ('ms', 0.0), 'v8_gc_finalize_incremental_reduce_memory_percentage_idle': ('idle%', 0.0), 'v8_gc_mark_compactor': ('ms', 0.0), 'v8_gc_mark_compactor_average': ('ms', 0.0), 'v8_gc_mark_compactor_count': ('count', 0), 'v8_gc_mark_compactor_max': ('ms', 0.0), 'v8_gc_mark_compactor_idle_deadline_overrun': ('ms', 0.0), 'v8_gc_mark_compactor_outside_idle': ('ms', 0.0), 'v8_gc_mark_compactor_percentage_idle': ('idle%', 0.0), 'v8_gc_scavenger': ('ms', 0.0), 'v8_gc_scavenger_average': ('ms', 0.0), 'v8_gc_scavenger_count': ('count', 0), 'v8_gc_scavenger_max': ('ms', 0.0), 'v8_gc_scavenger_idle_deadline_overrun': ('ms', 0.0), 'v8_gc_scavenger_outside_idle': ('ms', 0.0), 'v8_gc_scavenger_percentage_idle': ('idle%', 0.0), 'v8_gc_total': ('ms', 0.0), 'v8_gc_total_idle_deadline_overrun': ('ms', 0.0), 'v8_gc_total_outside_idle': ('ms', 0.0)}
bsd-3-clause
StefanRijnhart/OpenUpgrade
addons/sale/edi/__init__.py
454
1065
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (c) 2011 OpenERP S.A. <http://openerp.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import sale_order # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
looker/sentry
src/sentry/south_migrations/0182_auto__add_field_auditlogentry_actor_label__add_field_auditlogentry_act.py
4
56512
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'AuditLogEntry.actor_label' db.add_column( 'sentry_auditlogentry', 'actor_label', self.gf('django.db.models.fields.CharField')(max_length=64, null=True, blank=True), keep_default=False ) # Adding field 'AuditLogEntry.actor_key' db.add_column( 'sentry_auditlogentry', 'actor_key', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')( to=orm['sentry.ApiKey'], null=True, blank=True ), keep_default=False ) # Changing field 'AuditLogEntry.actor' db.alter_column( 'sentry_auditlogentry', 'actor_id', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')( null=True, to=orm['sentry.User'] ) ) def backwards(self, orm): # Deleting field 'AuditLogEntry.actor_label' db.delete_column('sentry_auditlogentry', 'actor_label') # Deleting field 'AuditLogEntry.actor_key' db.delete_column('sentry_auditlogentry', 'actor_key_id') # User chose to not deal with backwards NULL issues for 'AuditLogEntry.actor' raise RuntimeError( "Cannot reverse this migration. 'AuditLogEntry.actor' and its values cannot be restored." ) # The following code is provided here to aid in writing a correct migration # Changing field 'AuditLogEntry.actor' db.alter_column( 'sentry_auditlogentry', 'actor_id', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(to=orm['sentry.User']) ) models = { 'sentry.accessgroup': { 'Meta': { 'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup' }, 'data': ( 'sentry.db.models.fields.gzippeddict.GzippedDictField', [], { 'null': 'True', 'blank': 'True' } ), 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'managed': ('django.db.models.fields.BooleanField', [], { 'default': 'False' }), 'members': ( 'django.db.models.fields.related.ManyToManyField', [], { 'to': "orm['sentry.User']", 'symmetrical': 'False' } ), 'name': ('django.db.models.fields.CharField', [], { 'max_length': '64' }), 'projects': ( 'django.db.models.fields.related.ManyToManyField', [], { 'to': "orm['sentry.Project']", 'symmetrical': 'False' } ), 'team': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Team']" } ), 'type': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], { 'default': '50' }) }, 'sentry.activity': { 'Meta': { 'object_name': 'Activity' }, 'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], { 'null': 'True' }), 'datetime': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'event': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Event']", 'null': 'True' } ), 'group': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Group']", 'null': 'True' } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'ident': ('django.db.models.fields.CharField', [], { 'max_length': '64', 'null': 'True' }), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']" } ), 'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'user': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.User']", 'null': 'True' } ) }, 'sentry.alert': { 'Meta': { 'object_name': 'Alert' }, 'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], { 'null': 'True' }), 'datetime': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'group': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Group']", 'null': 'True' } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'message': ('django.db.models.fields.TextField', [], {}), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']" } ), 'related_groups': ( 'django.db.models.fields.related.ManyToManyField', [], { 'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': "orm['sentry.AlertRelatedGroup']", 'to': "orm['sentry.Group']" } ), 'status': ( 'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '0', 'db_index': 'True' } ) }, 'sentry.alertrelatedgroup': { 'Meta': { 'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup' }, 'alert': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Alert']" } ), 'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], { 'null': 'True' }), 'group': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Group']" } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }) }, 'sentry.apikey': { 'Meta': { 'object_name': 'ApiKey' }, 'allowed_origins': ('django.db.models.fields.TextField', [], { 'null': 'True', 'blank': 'True' }), 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'key': ('django.db.models.fields.CharField', [], { 'unique': 'True', 'max_length': '32' }), 'label': ( 'django.db.models.fields.CharField', [], { 'default': "'Default'", 'max_length': '64', 'blank': 'True' } ), 'organization': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'related_name': "'key_set'", 'to': "orm['sentry.Organization']" } ), 'scopes': ('django.db.models.fields.BigIntegerField', [], { 'default': 'None' }), 'status': ( 'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '0', 'db_index': 'True' } ) }, 'sentry.auditlogentry': { 'Meta': { 'object_name': 'AuditLogEntry' }, 'actor': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']" } ), 'actor_key': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True' } ), 'actor_label': ( 'django.db.models.fields.CharField', [], { 'max_length': '64', 'null': 'True', 'blank': 'True' } ), 'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}), 'datetime': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'ip_address': ( 'django.db.models.fields.GenericIPAddressField', [], { 'max_length': '39', 'null': 'True' } ), 'organization': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Organization']" } ), 'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'null': 'True' }), 'target_user': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']" } ) }, 'sentry.authidentity': { 'Meta': { 'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity' }, 'auth_provider': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.AuthProvider']" } ), 'data': ('jsonfield.fields.JSONField', [], { 'default': '{}' }), 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'ident': ('django.db.models.fields.CharField', [], { 'max_length': '128' }), 'last_synced': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'last_verified': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'user': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.User']" } ) }, 'sentry.authprovider': { 'Meta': { 'object_name': 'AuthProvider' }, 'config': ('jsonfield.fields.JSONField', [], { 'default': '{}' }), 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'default_global_access': ('django.db.models.fields.BooleanField', [], { 'default': 'True' }), 'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '50' }), 'default_teams': ( 'django.db.models.fields.related.ManyToManyField', [], { 'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True' } ), 'flags': ('django.db.models.fields.BigIntegerField', [], { 'default': '0' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'last_sync': ('django.db.models.fields.DateTimeField', [], { 'null': 'True' }), 'organization': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Organization']", 'unique': 'True' } ), 'provider': ('django.db.models.fields.CharField', [], { 'max_length': '128' }), 'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'null': 'True' }) }, 'sentry.broadcast': { 'Meta': { 'object_name': 'Broadcast' }, 'badge': ( 'django.db.models.fields.CharField', [], { 'max_length': '32', 'null': 'True', 'blank': 'True' } ), 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'is_active': ('django.db.models.fields.BooleanField', [], { 'default': 'True', 'db_index': 'True' }), 'link': ( 'django.db.models.fields.URLField', [], { 'max_length': '200', 'null': 'True', 'blank': 'True' } ), 'message': ('django.db.models.fields.CharField', [], { 'max_length': '256' }) }, 'sentry.event': { 'Meta': { 'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group', 'datetime'),)" }, 'checksum': ('django.db.models.fields.CharField', [], { 'max_length': '32', 'db_index': 'True' }), 'data': ('sentry.db.models.fields.node.NodeField', [], { 'null': 'True', 'blank': 'True' }), 'datetime': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now', 'db_index': 'True' } ), 'event_id': ( 'django.db.models.fields.CharField', [], { 'max_length': '32', 'null': 'True', 'db_column': "'message_id'" } ), 'group': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']" } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'message': ('django.db.models.fields.TextField', [], {}), 'num_comments': ( 'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '0', 'null': 'True' } ), 'platform': ('django.db.models.fields.CharField', [], { 'max_length': '64', 'null': 'True' }), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']", 'null': 'True' } ), 'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], { 'null': 'True' }) }, 'sentry.eventmapping': { 'Meta': { 'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping' }, 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'event_id': ('django.db.models.fields.CharField', [], { 'max_length': '32' }), 'group': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Group']" } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']" } ) }, 'sentry.file': { 'Meta': { 'object_name': 'File' }, 'checksum': ('django.db.models.fields.CharField', [], { 'max_length': '40', 'null': 'True' }), 'headers': ('jsonfield.fields.JSONField', [], { 'default': '{}' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'name': ('django.db.models.fields.CharField', [], { 'max_length': '128' }), 'path': ('django.db.models.fields.TextField', [], { 'null': 'True' }), 'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'null': 'True' }), 'storage': ('django.db.models.fields.CharField', [], { 'max_length': '128', 'null': 'True' }), 'storage_options': ('jsonfield.fields.JSONField', [], { 'default': '{}' }), 'timestamp': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now', 'db_index': 'True' } ), 'type': ('django.db.models.fields.CharField', [], { 'max_length': '64' }) }, 'sentry.group': { 'Meta': { 'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'" }, 'active_at': ('django.db.models.fields.DateTimeField', [], { 'null': 'True', 'db_index': 'True' }), 'checksum': ('django.db.models.fields.CharField', [], { 'max_length': '32', 'db_index': 'True' }), 'culprit': ( 'django.db.models.fields.CharField', [], { 'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True' } ), 'data': ( 'sentry.db.models.fields.gzippeddict.GzippedDictField', [], { 'null': 'True', 'blank': 'True' } ), 'first_seen': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now', 'db_index': 'True' } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'is_public': ( 'django.db.models.fields.NullBooleanField', [], { 'default': 'False', 'null': 'True', 'blank': 'True' } ), 'last_seen': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now', 'db_index': 'True' } ), 'level': ( 'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '40', 'db_index': 'True', 'blank': 'True' } ), 'logger': ( 'django.db.models.fields.CharField', [], { 'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True' } ), 'message': ('django.db.models.fields.TextField', [], {}), 'num_comments': ( 'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '0', 'null': 'True' } ), 'platform': ('django.db.models.fields.CharField', [], { 'max_length': '64', 'null': 'True' }), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']", 'null': 'True' } ), 'resolved_at': ('django.db.models.fields.DateTimeField', [], { 'null': 'True', 'db_index': 'True' }), 'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], { 'default': '0' }), 'status': ( 'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '0', 'db_index': 'True' } ), 'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], { 'default': '0' }), 'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], { 'default': '0' }), 'times_seen': ( 'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '1', 'db_index': 'True' } ) }, 'sentry.groupassignee': { 'Meta': { 'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'" }, 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'group': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']" } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'related_name': "'assignee_set'", 'to': "orm['sentry.Project']" } ), 'user': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']" } ) }, 'sentry.groupbookmark': { 'Meta': { 'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark' }, 'group': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']" } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']" } ), 'user': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']" } ) }, 'sentry.grouphash': { 'Meta': { 'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash' }, 'group': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Group']", 'null': 'True' } ), 'hash': ('django.db.models.fields.CharField', [], { 'max_length': '32', 'db_index': 'True' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']", 'null': 'True' } ) }, 'sentry.groupmeta': { 'Meta': { 'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta' }, 'group': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Group']" } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'key': ('django.db.models.fields.CharField', [], { 'max_length': '64' }), 'value': ('django.db.models.fields.TextField', [], {}) }, 'sentry.grouprulestatus': { 'Meta': { 'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus' }, 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'group': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Group']" } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'last_active': ('django.db.models.fields.DateTimeField', [], { 'null': 'True' }), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']" } ), 'rule': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Rule']" } ), 'status': ('django.db.models.fields.PositiveSmallIntegerField', [], { 'default': '0' }) }, 'sentry.groupseen': { 'Meta': { 'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen' }, 'group': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Group']" } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'last_seen': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']" } ), 'user': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.User']", 'db_index': 'False' } ) }, 'sentry.grouptagkey': { 'Meta': { 'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey' }, 'group': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Group']" } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'key': ('django.db.models.fields.CharField', [], { 'max_length': '32' }), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']", 'null': 'True' } ), 'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '0' }) }, 'sentry.grouptagvalue': { 'Meta': { 'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'" }, 'first_seen': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True' } ), 'group': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'related_name': "'grouptag'", 'to': "orm['sentry.Group']" } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'key': ('django.db.models.fields.CharField', [], { 'max_length': '32' }), 'last_seen': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True' } ), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']" } ), 'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '0' }), 'value': ('django.db.models.fields.CharField', [], { 'max_length': '200' }) }, 'sentry.helppage': { 'Meta': { 'object_name': 'HelpPage' }, 'content': ('django.db.models.fields.TextField', [], {}), 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'is_visible': ('django.db.models.fields.BooleanField', [], { 'default': 'True' }), 'key': ( 'django.db.models.fields.CharField', [], { 'max_length': '64', 'unique': 'True', 'null': 'True' } ), 'priority': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '50' }), 'title': ('django.db.models.fields.CharField', [], { 'max_length': '64' }) }, 'sentry.lostpasswordhash': { 'Meta': { 'object_name': 'LostPasswordHash' }, 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'hash': ('django.db.models.fields.CharField', [], { 'max_length': '32' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'user': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.User']", 'unique': 'True' } ) }, 'sentry.option': { 'Meta': { 'object_name': 'Option' }, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'key': ('django.db.models.fields.CharField', [], { 'unique': 'True', 'max_length': '64' }), 'last_updated': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}) }, 'sentry.organization': { 'Meta': { 'object_name': 'Organization' }, 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'flags': ('django.db.models.fields.BigIntegerField', [], { 'default': '0' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'members': ( 'django.db.models.fields.related.ManyToManyField', [], { 'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']" } ), 'name': ('django.db.models.fields.CharField', [], { 'max_length': '64' }), 'owner': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.User']" } ), 'slug': ('django.db.models.fields.SlugField', [], { 'unique': 'True', 'max_length': '50' }), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '0' }) }, 'sentry.organizationaccessrequest': { 'Meta': { 'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest' }, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'member': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.OrganizationMember']" } ), 'team': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Team']" } ) }, 'sentry.organizationmember': { 'Meta': { 'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember' }, 'counter': ( 'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'null': 'True', 'blank': 'True' } ), 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'email': ( 'django.db.models.fields.EmailField', [], { 'max_length': '75', 'null': 'True', 'blank': 'True' } ), 'flags': ('django.db.models.fields.BigIntegerField', [], { 'default': '0' }), 'has_global_access': ('django.db.models.fields.BooleanField', [], { 'default': 'True' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'organization': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'related_name': "'member_set'", 'to': "orm['sentry.Organization']" } ), 'teams': ( 'django.db.models.fields.related.ManyToManyField', [], { 'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True' } ), 'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '50' }), 'user': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']" } ) }, 'sentry.organizationmemberteam': { 'Meta': { 'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'" }, 'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], { 'primary_key': 'True' }), 'is_active': ('django.db.models.fields.BooleanField', [], { 'default': 'True' }), 'organizationmember': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.OrganizationMember']" } ), 'team': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Team']" } ) }, 'sentry.project': { 'Meta': { 'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project' }, 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'name': ('django.db.models.fields.CharField', [], { 'max_length': '200' }), 'organization': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Organization']" } ), 'platform': ('django.db.models.fields.CharField', [], { 'max_length': '32', 'null': 'True' }), 'public': ('django.db.models.fields.BooleanField', [], { 'default': 'False' }), 'slug': ('django.db.models.fields.SlugField', [], { 'max_length': '50', 'null': 'True' }), 'status': ( 'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '0', 'db_index': 'True' } ), 'team': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Team']" } ) }, 'sentry.projectkey': { 'Meta': { 'object_name': 'ProjectKey' }, 'date_added': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now', 'null': 'True' } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'label': ( 'django.db.models.fields.CharField', [], { 'max_length': '64', 'null': 'True', 'blank': 'True' } ), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'related_name': "'key_set'", 'to': "orm['sentry.Project']" } ), 'public_key': ( 'django.db.models.fields.CharField', [], { 'max_length': '32', 'unique': 'True', 'null': 'True' } ), 'roles': ('django.db.models.fields.BigIntegerField', [], { 'default': '1' }), 'secret_key': ( 'django.db.models.fields.CharField', [], { 'max_length': '32', 'unique': 'True', 'null': 'True' } ), 'status': ( 'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '0', 'db_index': 'True' } ) }, 'sentry.projectoption': { 'Meta': { 'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'" }, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'key': ('django.db.models.fields.CharField', [], { 'max_length': '64' }), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']" } ), 'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}) }, 'sentry.release': { 'Meta': { 'unique_together': "(('project', 'version'),)", 'object_name': 'Release' }, 'data': ('jsonfield.fields.JSONField', [], { 'default': '{}' }), 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'date_released': ('django.db.models.fields.DateTimeField', [], { 'null': 'True', 'blank': 'True' }), 'date_started': ('django.db.models.fields.DateTimeField', [], { 'null': 'True', 'blank': 'True' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']" } ), 'ref': ( 'django.db.models.fields.CharField', [], { 'max_length': '64', 'null': 'True', 'blank': 'True' } ), 'url': ( 'django.db.models.fields.URLField', [], { 'max_length': '200', 'null': 'True', 'blank': 'True' } ), 'version': ('django.db.models.fields.CharField', [], { 'max_length': '64' }) }, 'sentry.releasefile': { 'Meta': { 'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile' }, 'file': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.File']" } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'ident': ('django.db.models.fields.CharField', [], { 'max_length': '40' }), 'name': ('django.db.models.fields.TextField', [], {}), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']" } ), 'release': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Release']" } ) }, 'sentry.rule': { 'Meta': { 'object_name': 'Rule' }, 'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}), 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'label': ('django.db.models.fields.CharField', [], { 'max_length': '64' }), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']" } ) }, 'sentry.tagkey': { 'Meta': { 'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'" }, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'key': ('django.db.models.fields.CharField', [], { 'max_length': '32' }), 'label': ('django.db.models.fields.CharField', [], { 'max_length': '64', 'null': 'True' }), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']" } ), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '0' }), 'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '0' }) }, 'sentry.tagvalue': { 'Meta': { 'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'" }, 'data': ( 'sentry.db.models.fields.gzippeddict.GzippedDictField', [], { 'null': 'True', 'blank': 'True' } ), 'first_seen': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True' } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'key': ('django.db.models.fields.CharField', [], { 'max_length': '32' }), 'last_seen': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True' } ), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']", 'null': 'True' } ), 'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '0' }), 'value': ('django.db.models.fields.CharField', [], { 'max_length': '200' }) }, 'sentry.team': { 'Meta': { 'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team' }, 'date_added': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now', 'null': 'True' } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'name': ('django.db.models.fields.CharField', [], { 'max_length': '64' }), 'organization': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Organization']" } ), 'slug': ('django.db.models.fields.SlugField', [], { 'max_length': '50' }), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '0' }) }, 'sentry.user': { 'Meta': { 'object_name': 'User', 'db_table': "'auth_user'" }, 'date_joined': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'email': ('django.db.models.fields.EmailField', [], { 'max_length': '75', 'blank': 'True' }), 'first_name': ('django.db.models.fields.CharField', [], { 'max_length': '30', 'blank': 'True' }), 'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], { 'primary_key': 'True' }), 'is_active': ('django.db.models.fields.BooleanField', [], { 'default': 'True' }), 'is_managed': ('django.db.models.fields.BooleanField', [], { 'default': 'False' }), 'is_staff': ('django.db.models.fields.BooleanField', [], { 'default': 'False' }), 'is_superuser': ('django.db.models.fields.BooleanField', [], { 'default': 'False' }), 'last_login': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'last_name': ('django.db.models.fields.CharField', [], { 'max_length': '30', 'blank': 'True' }), 'password': ('django.db.models.fields.CharField', [], { 'max_length': '128' }), 'username': ('django.db.models.fields.CharField', [], { 'unique': 'True', 'max_length': '128' }) }, 'sentry.useroption': { 'Meta': { 'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption' }, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'key': ('django.db.models.fields.CharField', [], { 'max_length': '64' }), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']", 'null': 'True' } ), 'user': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.User']" } ), 'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}) } } complete_apps = ['sentry']
bsd-3-clause
jumpstarter-io/keystone
keystone/tests/unit/common/test_notifications.py
2
44394
# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import uuid import mock from oslo_config import cfg from oslo_config import fixture as config_fixture from oslotest import mockpatch from pycadf import cadftaxonomy from pycadf import cadftype from pycadf import eventfactory from pycadf import resource as cadfresource from keystone.common import dependency from keystone import notifications from keystone.tests import unit from keystone.tests.unit import test_v3 CONF = cfg.CONF EXP_RESOURCE_TYPE = uuid.uuid4().hex CREATED_OPERATION = notifications.ACTIONS.created UPDATED_OPERATION = notifications.ACTIONS.updated DELETED_OPERATION = notifications.ACTIONS.deleted DISABLED_OPERATION = notifications.ACTIONS.disabled class ArbitraryException(Exception): pass def register_callback(operation, resource_type=EXP_RESOURCE_TYPE): """Helper for creating and registering a mock callback. """ callback = mock.Mock(__name__='callback', im_class=mock.Mock(__name__='class')) notifications.register_event_callback(operation, resource_type, callback) return callback class AuditNotificationsTestCase(unit.BaseTestCase): def setUp(self): super(AuditNotificationsTestCase, self).setUp() self.config_fixture = self.useFixture(config_fixture.Config(CONF)) self.addCleanup(notifications.clear_subscribers) def _test_notification_operation(self, notify_function, operation): exp_resource_id = uuid.uuid4().hex callback = register_callback(operation) notify_function(EXP_RESOURCE_TYPE, exp_resource_id) callback.assert_called_once_with('identity', EXP_RESOURCE_TYPE, operation, {'resource_info': exp_resource_id}) self.config_fixture.config(notification_format='cadf') with mock.patch( 'keystone.notifications._create_cadf_payload') as cadf_notify: notify_function(EXP_RESOURCE_TYPE, exp_resource_id) initiator = None cadf_notify.assert_called_once_with( operation, EXP_RESOURCE_TYPE, exp_resource_id, notifications.taxonomy.OUTCOME_SUCCESS, initiator) notify_function(EXP_RESOURCE_TYPE, exp_resource_id, public=False) cadf_notify.assert_called_once_with( operation, EXP_RESOURCE_TYPE, exp_resource_id, notifications.taxonomy.OUTCOME_SUCCESS, initiator) def test_resource_created_notification(self): self._test_notification_operation(notifications.Audit.created, CREATED_OPERATION) def test_resource_updated_notification(self): self._test_notification_operation(notifications.Audit.updated, UPDATED_OPERATION) def test_resource_deleted_notification(self): self._test_notification_operation(notifications.Audit.deleted, DELETED_OPERATION) def test_resource_disabled_notification(self): self._test_notification_operation(notifications.Audit.disabled, DISABLED_OPERATION) class NotificationsWrapperTestCase(unit.BaseTestCase): def create_fake_ref(self): resource_id = uuid.uuid4().hex return resource_id, { 'id': resource_id, 'key': uuid.uuid4().hex } @notifications.created(EXP_RESOURCE_TYPE) def create_resource(self, resource_id, data): return data def test_resource_created_notification(self): exp_resource_id, data = self.create_fake_ref() callback = register_callback(CREATED_OPERATION) self.create_resource(exp_resource_id, data) callback.assert_called_with('identity', EXP_RESOURCE_TYPE, CREATED_OPERATION, {'resource_info': exp_resource_id}) @notifications.updated(EXP_RESOURCE_TYPE) def update_resource(self, resource_id, data): return data def test_resource_updated_notification(self): exp_resource_id, data = self.create_fake_ref() callback = register_callback(UPDATED_OPERATION) self.update_resource(exp_resource_id, data) callback.assert_called_with('identity', EXP_RESOURCE_TYPE, UPDATED_OPERATION, {'resource_info': exp_resource_id}) @notifications.deleted(EXP_RESOURCE_TYPE) def delete_resource(self, resource_id): pass def test_resource_deleted_notification(self): exp_resource_id = uuid.uuid4().hex callback = register_callback(DELETED_OPERATION) self.delete_resource(exp_resource_id) callback.assert_called_with('identity', EXP_RESOURCE_TYPE, DELETED_OPERATION, {'resource_info': exp_resource_id}) @notifications.created(EXP_RESOURCE_TYPE) def create_exception(self, resource_id): raise ArbitraryException() def test_create_exception_without_notification(self): callback = register_callback(CREATED_OPERATION) self.assertRaises( ArbitraryException, self.create_exception, uuid.uuid4().hex) self.assertFalse(callback.called) @notifications.created(EXP_RESOURCE_TYPE) def update_exception(self, resource_id): raise ArbitraryException() def test_update_exception_without_notification(self): callback = register_callback(UPDATED_OPERATION) self.assertRaises( ArbitraryException, self.update_exception, uuid.uuid4().hex) self.assertFalse(callback.called) @notifications.deleted(EXP_RESOURCE_TYPE) def delete_exception(self, resource_id): raise ArbitraryException() def test_delete_exception_without_notification(self): callback = register_callback(DELETED_OPERATION) self.assertRaises( ArbitraryException, self.delete_exception, uuid.uuid4().hex) self.assertFalse(callback.called) class NotificationsTestCase(unit.BaseTestCase): def setUp(self): super(NotificationsTestCase, self).setUp() # these should use self.config_fixture.config(), but they haven't # been registered yet CONF.rpc_backend = 'fake' CONF.notification_driver = ['fake'] def test_send_notification(self): """Test the private method _send_notification to ensure event_type, payload, and context are built and passed properly. """ resource = uuid.uuid4().hex resource_type = EXP_RESOURCE_TYPE operation = CREATED_OPERATION # NOTE(ldbragst): Even though notifications._send_notification doesn't # contain logic that creates cases, this is supposed to test that # context is always empty and that we ensure the resource ID of the # resource in the notification is contained in the payload. It was # agreed that context should be empty in Keystone's case, which is # also noted in the /keystone/notifications.py module. This test # ensures and maintains these conditions. expected_args = [ {}, # empty context 'identity.%s.created' % resource_type, # event_type {'resource_info': resource}, # payload 'INFO', # priority is always INFO... ] with mock.patch.object(notifications._get_notifier(), '_notify') as mocked: notifications._send_notification(operation, resource_type, resource) mocked.assert_called_once_with(*expected_args) class BaseNotificationTest(test_v3.RestfulTestCase): def setUp(self): super(BaseNotificationTest, self).setUp() self._notifications = [] self._audits = [] def fake_notify(operation, resource_type, resource_id, public=True): note = { 'resource_id': resource_id, 'operation': operation, 'resource_type': resource_type, 'send_notification_called': True, 'public': public} self._notifications.append(note) self.useFixture(mockpatch.PatchObject( notifications, '_send_notification', fake_notify)) def fake_audit(action, initiator, outcome, target, event_type, **kwargs): service_security = cadftaxonomy.SERVICE_SECURITY event = eventfactory.EventFactory().new_event( eventType=cadftype.EVENTTYPE_ACTIVITY, outcome=outcome, action=action, initiator=initiator, target=target, observer=cadfresource.Resource(typeURI=service_security)) for key, value in kwargs.items(): setattr(event, key, value) audit = { 'payload': event.as_dict(), 'event_type': event_type, 'send_notification_called': True} self._audits.append(audit) self.useFixture(mockpatch.PatchObject( notifications, '_send_audit_notification', fake_audit)) def _assert_last_note(self, resource_id, operation, resource_type): # NOTE(stevemar): If 'basic' format is not used, then simply # return since this assertion is not valid. if CONF.notification_format != 'basic': return self.assertTrue(len(self._notifications) > 0) note = self._notifications[-1] self.assertEqual(note['operation'], operation) self.assertEqual(note['resource_id'], resource_id) self.assertEqual(note['resource_type'], resource_type) self.assertTrue(note['send_notification_called']) def _assert_last_audit(self, resource_id, operation, resource_type, target_uri): # NOTE(stevemar): If 'cadf' format is not used, then simply # return since this assertion is not valid. if CONF.notification_format != 'cadf': return self.assertTrue(len(self._audits) > 0) audit = self._audits[-1] payload = audit['payload'] self.assertEqual(resource_id, payload['resource_info']) action = '%s.%s' % (operation, resource_type) self.assertEqual(action, payload['action']) self.assertEqual(target_uri, payload['target']['typeURI']) self.assertEqual(resource_id, payload['target']['id']) event_type = '%s.%s.%s' % ('identity', resource_type, operation) self.assertEqual(event_type, audit['event_type']) self.assertTrue(audit['send_notification_called']) def _assert_notify_not_sent(self, resource_id, operation, resource_type, public=True): unexpected = { 'resource_id': resource_id, 'operation': operation, 'resource_type': resource_type, 'send_notification_called': True, 'public': public} for note in self._notifications: self.assertNotEqual(unexpected, note) def _assert_notify_sent(self, resource_id, operation, resource_type, public=True): expected = { 'resource_id': resource_id, 'operation': operation, 'resource_type': resource_type, 'send_notification_called': True, 'public': public} for note in self._notifications: if expected == note: break else: self.fail("Notification not sent.") class NotificationsForEntities(BaseNotificationTest): def test_create_group(self): group_ref = self.new_group_ref(domain_id=self.domain_id) group_ref = self.identity_api.create_group(group_ref) self._assert_last_note(group_ref['id'], CREATED_OPERATION, 'group') self._assert_last_audit(group_ref['id'], CREATED_OPERATION, 'group', cadftaxonomy.SECURITY_GROUP) def test_create_project(self): project_ref = self.new_project_ref(domain_id=self.domain_id) self.assignment_api.create_project(project_ref['id'], project_ref) self._assert_last_note( project_ref['id'], CREATED_OPERATION, 'project') self._assert_last_audit(project_ref['id'], CREATED_OPERATION, 'project', cadftaxonomy.SECURITY_PROJECT) def test_create_role(self): role_ref = self.new_role_ref() self.role_api.create_role(role_ref['id'], role_ref) self._assert_last_note(role_ref['id'], CREATED_OPERATION, 'role') self._assert_last_audit(role_ref['id'], CREATED_OPERATION, 'role', cadftaxonomy.SECURITY_ROLE) def test_create_user(self): user_ref = self.new_user_ref(domain_id=self.domain_id) user_ref = self.identity_api.create_user(user_ref) self._assert_last_note(user_ref['id'], CREATED_OPERATION, 'user') self._assert_last_audit(user_ref['id'], CREATED_OPERATION, 'user', cadftaxonomy.SECURITY_ACCOUNT_USER) def test_create_trust(self): trustor = self.new_user_ref(domain_id=self.domain_id) trustor = self.identity_api.create_user(trustor) trustee = self.new_user_ref(domain_id=self.domain_id) trustee = self.identity_api.create_user(trustee) role_ref = self.new_role_ref() self.role_api.create_role(role_ref['id'], role_ref) trust_ref = self.new_trust_ref(trustor['id'], trustee['id']) self.trust_api.create_trust(trust_ref['id'], trust_ref, [role_ref]) self._assert_last_note( trust_ref['id'], CREATED_OPERATION, 'OS-TRUST:trust') self._assert_last_audit(trust_ref['id'], CREATED_OPERATION, 'OS-TRUST:trust', cadftaxonomy.SECURITY_TRUST) def test_delete_group(self): group_ref = self.new_group_ref(domain_id=self.domain_id) group_ref = self.identity_api.create_group(group_ref) self.identity_api.delete_group(group_ref['id']) self._assert_last_note(group_ref['id'], DELETED_OPERATION, 'group') self._assert_last_audit(group_ref['id'], DELETED_OPERATION, 'group', cadftaxonomy.SECURITY_GROUP) def test_delete_project(self): project_ref = self.new_project_ref(domain_id=self.domain_id) self.assignment_api.create_project(project_ref['id'], project_ref) self.assignment_api.delete_project(project_ref['id']) self._assert_last_note( project_ref['id'], DELETED_OPERATION, 'project') self._assert_last_audit(project_ref['id'], DELETED_OPERATION, 'project', cadftaxonomy.SECURITY_PROJECT) def test_delete_role(self): role_ref = self.new_role_ref() self.role_api.create_role(role_ref['id'], role_ref) self.role_api.delete_role(role_ref['id']) self._assert_last_note(role_ref['id'], DELETED_OPERATION, 'role') self._assert_last_audit(role_ref['id'], DELETED_OPERATION, 'role', cadftaxonomy.SECURITY_ROLE) def test_delete_user(self): user_ref = self.new_user_ref(domain_id=self.domain_id) user_ref = self.identity_api.create_user(user_ref) self.identity_api.delete_user(user_ref['id']) self._assert_last_note(user_ref['id'], DELETED_OPERATION, 'user') self._assert_last_audit(user_ref['id'], DELETED_OPERATION, 'user', cadftaxonomy.SECURITY_ACCOUNT_USER) def test_create_domain(self): domain_ref = self.new_domain_ref() self.resource_api.create_domain(domain_ref['id'], domain_ref) self._assert_last_note(domain_ref['id'], CREATED_OPERATION, 'domain') self._assert_last_audit(domain_ref['id'], CREATED_OPERATION, 'domain', cadftaxonomy.SECURITY_DOMAIN) def test_update_domain(self): domain_ref = self.new_domain_ref() self.assignment_api.create_domain(domain_ref['id'], domain_ref) domain_ref['description'] = uuid.uuid4().hex self.assignment_api.update_domain(domain_ref['id'], domain_ref) self._assert_last_note(domain_ref['id'], UPDATED_OPERATION, 'domain') self._assert_last_audit(domain_ref['id'], UPDATED_OPERATION, 'domain', cadftaxonomy.SECURITY_DOMAIN) def test_delete_domain(self): domain_ref = self.new_domain_ref() self.assignment_api.create_domain(domain_ref['id'], domain_ref) domain_ref['enabled'] = False self.assignment_api.update_domain(domain_ref['id'], domain_ref) self.assignment_api.delete_domain(domain_ref['id']) self._assert_last_note(domain_ref['id'], DELETED_OPERATION, 'domain') self._assert_last_audit(domain_ref['id'], DELETED_OPERATION, 'domain', cadftaxonomy.SECURITY_DOMAIN) def test_delete_trust(self): trustor = self.new_user_ref(domain_id=self.domain_id) trustor = self.identity_api.create_user(trustor) trustee = self.new_user_ref(domain_id=self.domain_id) trustee = self.identity_api.create_user(trustee) role_ref = self.new_role_ref() trust_ref = self.new_trust_ref(trustor['id'], trustee['id']) self.trust_api.create_trust(trust_ref['id'], trust_ref, [role_ref]) self.trust_api.delete_trust(trust_ref['id']) self._assert_last_note( trust_ref['id'], DELETED_OPERATION, 'OS-TRUST:trust') self._assert_last_audit(trust_ref['id'], DELETED_OPERATION, 'OS-TRUST:trust', cadftaxonomy.SECURITY_TRUST) def test_create_endpoint(self): endpoint_ref = self.new_endpoint_ref(service_id=self.service_id) self.catalog_api.create_endpoint(endpoint_ref['id'], endpoint_ref) self._assert_notify_sent(endpoint_ref['id'], CREATED_OPERATION, 'endpoint') self._assert_last_audit(endpoint_ref['id'], CREATED_OPERATION, 'endpoint', cadftaxonomy.SECURITY_ENDPOINT) def test_update_endpoint(self): endpoint_ref = self.new_endpoint_ref(service_id=self.service_id) self.catalog_api.create_endpoint(endpoint_ref['id'], endpoint_ref) self.catalog_api.update_endpoint(endpoint_ref['id'], endpoint_ref) self._assert_notify_sent(endpoint_ref['id'], UPDATED_OPERATION, 'endpoint') self._assert_last_audit(endpoint_ref['id'], UPDATED_OPERATION, 'endpoint', cadftaxonomy.SECURITY_ENDPOINT) def test_delete_endpoint(self): endpoint_ref = self.new_endpoint_ref(service_id=self.service_id) self.catalog_api.create_endpoint(endpoint_ref['id'], endpoint_ref) self.catalog_api.delete_endpoint(endpoint_ref['id']) self._assert_notify_sent(endpoint_ref['id'], DELETED_OPERATION, 'endpoint') self._assert_last_audit(endpoint_ref['id'], DELETED_OPERATION, 'endpoint', cadftaxonomy.SECURITY_ENDPOINT) def test_create_service(self): service_ref = self.new_service_ref() self.catalog_api.create_service(service_ref['id'], service_ref) self._assert_notify_sent(service_ref['id'], CREATED_OPERATION, 'service') self._assert_last_audit(service_ref['id'], CREATED_OPERATION, 'service', cadftaxonomy.SECURITY_SERVICE) def test_update_service(self): service_ref = self.new_service_ref() self.catalog_api.create_service(service_ref['id'], service_ref) self.catalog_api.update_service(service_ref['id'], service_ref) self._assert_notify_sent(service_ref['id'], UPDATED_OPERATION, 'service') self._assert_last_audit(service_ref['id'], UPDATED_OPERATION, 'service', cadftaxonomy.SECURITY_SERVICE) def test_delete_service(self): service_ref = self.new_service_ref() self.catalog_api.create_service(service_ref['id'], service_ref) self.catalog_api.delete_service(service_ref['id']) self._assert_notify_sent(service_ref['id'], DELETED_OPERATION, 'service') self._assert_last_audit(service_ref['id'], DELETED_OPERATION, 'service', cadftaxonomy.SECURITY_SERVICE) def test_create_region(self): region_ref = self.new_region_ref() self.catalog_api.create_region(region_ref) self._assert_notify_sent(region_ref['id'], CREATED_OPERATION, 'region') self._assert_last_audit(region_ref['id'], CREATED_OPERATION, 'region', cadftaxonomy.SECURITY_REGION) def test_update_region(self): region_ref = self.new_region_ref() self.catalog_api.create_region(region_ref) self.catalog_api.update_region(region_ref['id'], region_ref) self._assert_notify_sent(region_ref['id'], UPDATED_OPERATION, 'region') self._assert_last_audit(region_ref['id'], UPDATED_OPERATION, 'region', cadftaxonomy.SECURITY_REGION) def test_delete_region(self): region_ref = self.new_region_ref() self.catalog_api.create_region(region_ref) self.catalog_api.delete_region(region_ref['id']) self._assert_notify_sent(region_ref['id'], DELETED_OPERATION, 'region') self._assert_last_audit(region_ref['id'], DELETED_OPERATION, 'region', cadftaxonomy.SECURITY_REGION) def test_create_policy(self): policy_ref = self.new_policy_ref() self.policy_api.create_policy(policy_ref['id'], policy_ref) self._assert_notify_sent(policy_ref['id'], CREATED_OPERATION, 'policy') self._assert_last_audit(policy_ref['id'], CREATED_OPERATION, 'policy', cadftaxonomy.SECURITY_POLICY) def test_update_policy(self): policy_ref = self.new_policy_ref() self.policy_api.create_policy(policy_ref['id'], policy_ref) self.policy_api.update_policy(policy_ref['id'], policy_ref) self._assert_notify_sent(policy_ref['id'], UPDATED_OPERATION, 'policy') self._assert_last_audit(policy_ref['id'], UPDATED_OPERATION, 'policy', cadftaxonomy.SECURITY_POLICY) def test_delete_policy(self): policy_ref = self.new_policy_ref() self.policy_api.create_policy(policy_ref['id'], policy_ref) self.policy_api.delete_policy(policy_ref['id']) self._assert_notify_sent(policy_ref['id'], DELETED_OPERATION, 'policy') self._assert_last_audit(policy_ref['id'], DELETED_OPERATION, 'policy', cadftaxonomy.SECURITY_POLICY) def test_disable_domain(self): domain_ref = self.new_domain_ref() self.assignment_api.create_domain(domain_ref['id'], domain_ref) domain_ref['enabled'] = False self.assignment_api.update_domain(domain_ref['id'], domain_ref) self._assert_notify_sent(domain_ref['id'], 'disabled', 'domain', public=False) def test_disable_of_disabled_domain_does_not_notify(self): domain_ref = self.new_domain_ref() domain_ref['enabled'] = False self.assignment_api.create_domain(domain_ref['id'], domain_ref) # The domain_ref above is not changed during the create process. We # can use the same ref to perform the update. self.assignment_api.update_domain(domain_ref['id'], domain_ref) self._assert_notify_not_sent(domain_ref['id'], 'disabled', 'domain', public=False) def test_update_group(self): group_ref = self.new_group_ref(domain_id=self.domain_id) group_ref = self.identity_api.create_group(group_ref) self.identity_api.update_group(group_ref['id'], group_ref) self._assert_last_note(group_ref['id'], UPDATED_OPERATION, 'group') self._assert_last_audit(group_ref['id'], UPDATED_OPERATION, 'group', cadftaxonomy.SECURITY_GROUP) def test_update_project(self): project_ref = self.new_project_ref(domain_id=self.domain_id) self.assignment_api.create_project(project_ref['id'], project_ref) self.assignment_api.update_project(project_ref['id'], project_ref) self._assert_notify_sent( project_ref['id'], UPDATED_OPERATION, 'project', public=True) self._assert_last_audit(project_ref['id'], UPDATED_OPERATION, 'project', cadftaxonomy.SECURITY_PROJECT) def test_disable_project(self): project_ref = self.new_project_ref(domain_id=self.domain_id) self.assignment_api.create_project(project_ref['id'], project_ref) project_ref['enabled'] = False self.assignment_api.update_project(project_ref['id'], project_ref) self._assert_notify_sent(project_ref['id'], 'disabled', 'project', public=False) def test_disable_of_disabled_project_does_not_notify(self): project_ref = self.new_project_ref(domain_id=self.domain_id) project_ref['enabled'] = False self.assignment_api.create_project(project_ref['id'], project_ref) # The project_ref above is not changed during the create process. We # can use the same ref to perform the update. self.assignment_api.update_project(project_ref['id'], project_ref) self._assert_notify_not_sent(project_ref['id'], 'disabled', 'project', public=False) def test_update_project_does_not_send_disable(self): project_ref = self.new_project_ref(domain_id=self.domain_id) self.assignment_api.create_project(project_ref['id'], project_ref) project_ref['enabled'] = True self.assignment_api.update_project(project_ref['id'], project_ref) self._assert_last_note( project_ref['id'], UPDATED_OPERATION, 'project') self._assert_notify_not_sent(project_ref['id'], 'disabled', 'project') def test_update_role(self): role_ref = self.new_role_ref() self.role_api.create_role(role_ref['id'], role_ref) self.role_api.update_role(role_ref['id'], role_ref) self._assert_last_note(role_ref['id'], UPDATED_OPERATION, 'role') self._assert_last_audit(role_ref['id'], UPDATED_OPERATION, 'role', cadftaxonomy.SECURITY_ROLE) def test_update_user(self): user_ref = self.new_user_ref(domain_id=self.domain_id) user_ref = self.identity_api.create_user(user_ref) self.identity_api.update_user(user_ref['id'], user_ref) self._assert_last_note(user_ref['id'], UPDATED_OPERATION, 'user') self._assert_last_audit(user_ref['id'], UPDATED_OPERATION, 'user', cadftaxonomy.SECURITY_ACCOUNT_USER) def test_config_option_no_events(self): self.config_fixture.config(notification_format='basic') role_ref = self.new_role_ref() self.role_api.create_role(role_ref['id'], role_ref) # The regular notifications will still be emitted, since they are # used for callback handling. self._assert_last_note(role_ref['id'], CREATED_OPERATION, 'role') # No audit event should have occurred self.assertEqual(0, len(self._audits)) class CADFNotificationsForEntities(NotificationsForEntities): def setUp(self): super(CADFNotificationsForEntities, self).setUp() self.config_fixture.config(notification_format='cadf') def test_initiator_data_is_set(self): ref = self.new_domain_ref() resp = self.post('/domains', body={'domain': ref}) resource_id = resp.result.get('domain').get('id') self._assert_last_audit(resource_id, CREATED_OPERATION, 'domain', cadftaxonomy.SECURITY_DOMAIN) self.assertTrue(len(self._audits) > 0) audit = self._audits[-1] payload = audit['payload'] self.assertEqual(self.user_id, payload['initiator']['id']) self.assertEqual(self.project_id, payload['initiator']['project_id']) class TestEventCallbacks(test_v3.RestfulTestCase): def setUp(self): super(TestEventCallbacks, self).setUp() self.has_been_called = False def _project_deleted_callback(self, service, resource_type, operation, payload): self.has_been_called = True def _project_created_callback(self, service, resource_type, operation, payload): self.has_been_called = True def test_notification_received(self): callback = register_callback(CREATED_OPERATION, 'project') project_ref = self.new_project_ref(domain_id=self.domain_id) self.assignment_api.create_project(project_ref['id'], project_ref) self.assertTrue(callback.called) def test_notification_method_not_callable(self): fake_method = None self.assertRaises(TypeError, notifications.register_event_callback, UPDATED_OPERATION, 'project', [fake_method]) def test_notification_event_not_valid(self): self.assertRaises(ValueError, notifications.register_event_callback, uuid.uuid4().hex, 'project', self._project_deleted_callback) def test_event_registration_for_unknown_resource_type(self): # Registration for unknown resource types should succeed. If no event # is issued for that resource type, the callback wont be triggered. notifications.register_event_callback(DELETED_OPERATION, uuid.uuid4().hex, self._project_deleted_callback) resource_type = uuid.uuid4().hex notifications.register_event_callback(DELETED_OPERATION, resource_type, self._project_deleted_callback) def test_provider_event_callbacks_subscription(self): callback_called = [] @dependency.provider('foo_api') class Foo(object): def __init__(self): self.event_callbacks = { CREATED_OPERATION: {'project': [self.foo_callback]}} def foo_callback(self, service, resource_type, operation, payload): # uses callback_called from the closure callback_called.append(True) Foo() project_ref = self.new_project_ref(domain_id=self.domain_id) self.assignment_api.create_project(project_ref['id'], project_ref) self.assertEqual([True], callback_called) def test_invalid_event_callbacks(self): @dependency.provider('foo_api') class Foo(object): def __init__(self): self.event_callbacks = 'bogus' self.assertRaises(ValueError, Foo) def test_invalid_event_callbacks_event(self): @dependency.provider('foo_api') class Foo(object): def __init__(self): self.event_callbacks = {CREATED_OPERATION: 'bogus'} self.assertRaises(ValueError, Foo) class CadfNotificationsWrapperTestCase(test_v3.RestfulTestCase): LOCAL_HOST = 'localhost' ACTION = 'authenticate' ROLE_ASSIGNMENT = 'role_assignment' def setUp(self): super(CadfNotificationsWrapperTestCase, self).setUp() self._notifications = [] def fake_notify(action, initiator, outcome, target, event_type, **kwargs): service_security = cadftaxonomy.SERVICE_SECURITY event = eventfactory.EventFactory().new_event( eventType=cadftype.EVENTTYPE_ACTIVITY, outcome=outcome, action=action, initiator=initiator, target=target, observer=cadfresource.Resource(typeURI=service_security)) for key, value in kwargs.items(): setattr(event, key, value) note = { 'action': action, 'initiator': initiator, 'event': event, 'event_type': event_type, 'send_notification_called': True} self._notifications.append(note) self.useFixture(mockpatch.PatchObject( notifications, '_send_audit_notification', fake_notify)) def _assert_last_note(self, action, user_id, event_type=None): self.assertTrue(self._notifications) note = self._notifications[-1] self.assertEqual(note['action'], action) initiator = note['initiator'] self.assertEqual(initiator.id, user_id) self.assertEqual(initiator.host.address, self.LOCAL_HOST) self.assertTrue(note['send_notification_called']) if event_type: self.assertEqual(note['event_type'], event_type) def _assert_event(self, role_id, project=None, domain=None, user=None, group=None, inherit=False): """Assert that the CADF event is valid. In the case of role assignments, the event will have extra data, specifically, the role, target, actor, and if the role is inherited. An example event, as a dictionary is seen below: { 'typeURI': 'http://schemas.dmtf.org/cloud/audit/1.0/event', 'initiator': { 'typeURI': 'service/security/account/user', 'host': {'address': 'localhost'}, 'id': 'openstack:0a90d95d-582c-4efb-9cbc-e2ca7ca9c341', 'name': u'bccc2d9bfc2a46fd9e33bcf82f0b5c21' }, 'target': { 'typeURI': 'service/security/account/user', 'id': 'openstack:d48ea485-ef70-4f65-8d2b-01aa9d7ec12d' }, 'observer': { 'typeURI': 'service/security', 'id': 'openstack:d51dd870-d929-4aba-8d75-dcd7555a0c95' }, 'eventType': 'activity', 'eventTime': '2014-08-21T21:04:56.204536+0000', 'role': u'0e6b990380154a2599ce6b6e91548a68', 'domain': u'24bdcff1aab8474895dbaac509793de1', 'inherited_to_projects': False, 'group': u'c1e22dc67cbd469ea0e33bf428fe597a', 'action': 'created.role_assignment', 'outcome': 'success', 'id': 'openstack:782689dd-f428-4f13-99c7-5c70f94a5ac1' } """ note = self._notifications[-1] event = note['event'] if project: self.assertEqual(project, event.project) if domain: self.assertEqual(domain, event.domain) if user: self.assertEqual(user, event.user) if group: self.assertEqual(group, event.group) self.assertEqual(role_id, event.role) self.assertEqual(inherit, event.inherited_to_projects) def test_v3_authenticate_user_name_and_domain_id(self): user_id = self.user_id user_name = self.user['name'] password = self.user['password'] domain_id = self.domain_id data = self.build_authentication_request(username=user_name, user_domain_id=domain_id, password=password) self.post('/auth/tokens', body=data) self._assert_last_note(self.ACTION, user_id) def test_v3_authenticate_user_id(self): user_id = self.user_id password = self.user['password'] data = self.build_authentication_request(user_id=user_id, password=password) self.post('/auth/tokens', body=data) self._assert_last_note(self.ACTION, user_id) def test_v3_authenticate_user_name_and_domain_name(self): user_id = self.user_id user_name = self.user['name'] password = self.user['password'] domain_name = self.domain['name'] data = self.build_authentication_request(username=user_name, user_domain_name=domain_name, password=password) self.post('/auth/tokens', body=data) self._assert_last_note(self.ACTION, user_id) def _test_role_assignment(self, url, role, project=None, domain=None, user=None, group=None): self.put(url) action = "%s.%s" % (CREATED_OPERATION, self.ROLE_ASSIGNMENT) event_type = '%s.%s.%s' % (notifications.SERVICE, self.ROLE_ASSIGNMENT, CREATED_OPERATION) self._assert_last_note(action, self.user_id, event_type) self._assert_event(role, project, domain, user, group) self.delete(url) action = "%s.%s" % (DELETED_OPERATION, self.ROLE_ASSIGNMENT) event_type = '%s.%s.%s' % (notifications.SERVICE, self.ROLE_ASSIGNMENT, DELETED_OPERATION) self._assert_last_note(action, self.user_id, event_type) self._assert_event(role, project, domain, user, group) def test_user_project_grant(self): url = ('/projects/%s/users/%s/roles/%s' % (self.project_id, self.user_id, self.role_id)) self._test_role_assignment(url, self.role_id, project=self.project_id, user=self.user_id) def test_group_domain_grant(self): group_ref = self.new_group_ref(domain_id=self.domain_id) group = self.identity_api.create_group(group_ref) url = ('/domains/%s/groups/%s/roles/%s' % (self.domain_id, group['id'], self.role_id)) self._test_role_assignment(url, self.role_id, domain=self.domain_id, group=group['id']) def test_add_role_to_user_and_project(self): # A notification is sent when add_role_to_user_and_project is called on # the assignment manager. project_ref = self.new_project_ref(self.domain_id) project = self.resource_api.create_project( project_ref['id'], project_ref) tenant_id = project['id'] self.assignment_api.add_role_to_user_and_project( self.user_id, tenant_id, self.role_id) self.assertTrue(self._notifications) note = self._notifications[-1] self.assertEqual(note['action'], 'created.role_assignment') self.assertTrue(note['send_notification_called']) self._assert_event(self.role_id, project=tenant_id, user=self.user_id) def test_remove_role_from_user_and_project(self): # A notification is sent when remove_role_from_user_and_project is # called on the assignment manager. self.assignment_api.remove_role_from_user_and_project( self.user_id, self.project_id, self.role_id) self.assertTrue(self._notifications) note = self._notifications[-1] self.assertEqual(note['action'], 'deleted.role_assignment') self.assertTrue(note['send_notification_called']) self._assert_event(self.role_id, project=self.project_id, user=self.user_id) class TestCallbackRegistration(unit.BaseTestCase): def setUp(self): super(TestCallbackRegistration, self).setUp() self.mock_log = mock.Mock() # Force the callback logging to occur self.mock_log.logger.getEffectiveLevel.return_value = logging.DEBUG def verify_log_message(self, data): """Tests that use this are a little brittle because adding more logging can break them. TODO(dstanek): remove the need for this in a future refactoring """ log_fn = self.mock_log.debug self.assertEqual(len(data), log_fn.call_count) for datum in data: log_fn.assert_any_call(mock.ANY, datum) def test_a_function_callback(self): def callback(*args, **kwargs): pass resource_type = 'thing' with mock.patch('keystone.notifications.LOG', self.mock_log): notifications.register_event_callback( CREATED_OPERATION, resource_type, callback) callback = 'keystone.tests.unit.common.test_notifications.callback' expected_log_data = { 'callback': callback, 'event': 'identity.%s.created' % resource_type } self.verify_log_message([expected_log_data]) def test_a_method_callback(self): class C(object): def callback(self, *args, **kwargs): pass with mock.patch('keystone.notifications.LOG', self.mock_log): notifications.register_event_callback( CREATED_OPERATION, 'thing', C.callback) callback = 'keystone.tests.unit.common.test_notifications.C.callback' expected_log_data = { 'callback': callback, 'event': 'identity.thing.created' } self.verify_log_message([expected_log_data]) def test_a_list_of_callbacks(self): def callback(*args, **kwargs): pass class C(object): def callback(self, *args, **kwargs): pass with mock.patch('keystone.notifications.LOG', self.mock_log): notifications.register_event_callback( CREATED_OPERATION, 'thing', [callback, C.callback]) callback_1 = 'keystone.tests.unit.common.test_notifications.callback' callback_2 = 'keystone.tests.unit.common.test_notifications.C.callback' expected_log_data = [ { 'callback': callback_1, 'event': 'identity.thing.created' }, { 'callback': callback_2, 'event': 'identity.thing.created' }, ] self.verify_log_message(expected_log_data) def test_an_invalid_callback(self): self.assertRaises(TypeError, notifications.register_event_callback, (CREATED_OPERATION, 'thing', object())) def test_an_invalid_event(self): def callback(*args, **kwargs): pass self.assertRaises(ValueError, notifications.register_event_callback, uuid.uuid4().hex, 'thing', callback)
apache-2.0
beezee/GAE-Django-base-app
django/middleware/cache.py
241
9078
""" Cache middleware. If enabled, each Django-powered page will be cached based on URL. The canonical way to enable cache middleware is to set ``UpdateCacheMiddleware`` as your first piece of middleware, and ``FetchFromCacheMiddleware`` as the last:: MIDDLEWARE_CLASSES = [ 'django.middleware.cache.UpdateCacheMiddleware', ... 'django.middleware.cache.FetchFromCacheMiddleware' ] This is counter-intuitive, but correct: ``UpdateCacheMiddleware`` needs to run last during the response phase, which processes middleware bottom-up; ``FetchFromCacheMiddleware`` needs to run last during the request phase, which processes middleware top-down. The single-class ``CacheMiddleware`` can be used for some simple sites. However, if any other piece of middleware needs to affect the cache key, you'll need to use the two-part ``UpdateCacheMiddleware`` and ``FetchFromCacheMiddleware``. This'll most often happen when you're using Django's ``LocaleMiddleware``. More details about how the caching works: * Only GET or HEAD-requests with status code 200 are cached. * The number of seconds each page is stored for is set by the "max-age" section of the response's "Cache-Control" header, falling back to the CACHE_MIDDLEWARE_SECONDS setting if the section was not found. * If CACHE_MIDDLEWARE_ANONYMOUS_ONLY is set to True, only anonymous requests (i.e., those not made by a logged-in user) will be cached. This is a simple and effective way of avoiding the caching of the Django admin (and any other user-specific content). * This middleware expects that a HEAD request is answered with the same response headers exactly like the corresponding GET request. * When a hit occurs, a shallow copy of the original response object is returned from process_request. * Pages will be cached based on the contents of the request headers listed in the response's "Vary" header. * This middleware also sets ETag, Last-Modified, Expires and Cache-Control headers on the response object. """ from django.conf import settings from django.core.cache import get_cache, DEFAULT_CACHE_ALIAS from django.utils.cache import get_cache_key, learn_cache_key, patch_response_headers, get_max_age class UpdateCacheMiddleware(object): """ Response-phase cache middleware that updates the cache if the response is cacheable. Must be used as part of the two-part update/fetch cache middleware. UpdateCacheMiddleware must be the first piece of middleware in MIDDLEWARE_CLASSES so that it'll get called last during the response phase. """ def __init__(self): self.cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX self.cache_anonymous_only = getattr(settings, 'CACHE_MIDDLEWARE_ANONYMOUS_ONLY', False) self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS self.cache = get_cache(self.cache_alias) def _session_accessed(self, request): try: return request.session.accessed except AttributeError: return False def _should_update_cache(self, request, response): if not hasattr(request, '_cache_update_cache') or not request._cache_update_cache: return False # If the session has not been accessed otherwise, we don't want to # cause it to be accessed here. If it hasn't been accessed, then the # user's logged-in status has not affected the response anyway. if self.cache_anonymous_only and self._session_accessed(request): assert hasattr(request, 'user'), "The Django cache middleware with CACHE_MIDDLEWARE_ANONYMOUS_ONLY=True requires authentication middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert 'django.contrib.auth.middleware.AuthenticationMiddleware' before the CacheMiddleware." if request.user.is_authenticated(): # Don't cache user-variable requests from authenticated users. return False return True def process_response(self, request, response): """Sets the cache, if needed.""" if not self._should_update_cache(request, response): # We don't need to update the cache, just return. return response if not response.status_code == 200: return response # Try to get the timeout from the "max-age" section of the "Cache- # Control" header before reverting to using the default cache_timeout # length. timeout = get_max_age(response) if timeout == None: timeout = self.cache_timeout elif timeout == 0: # max-age was set to 0, don't bother caching. return response patch_response_headers(response, timeout) if timeout: cache_key = learn_cache_key(request, response, timeout, self.key_prefix, cache=self.cache) if hasattr(response, 'render') and callable(response.render): response.add_post_render_callback( lambda r: self.cache.set(cache_key, r, timeout) ) else: self.cache.set(cache_key, response, timeout) return response class FetchFromCacheMiddleware(object): """ Request-phase cache middleware that fetches a page from the cache. Must be used as part of the two-part update/fetch cache middleware. FetchFromCacheMiddleware must be the last piece of middleware in MIDDLEWARE_CLASSES so that it'll get called last during the request phase. """ def __init__(self): self.cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX self.cache_anonymous_only = getattr(settings, 'CACHE_MIDDLEWARE_ANONYMOUS_ONLY', False) self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS self.cache = get_cache(self.cache_alias) def process_request(self, request): """ Checks whether the page is already cached and returns the cached version if available. """ if not request.method in ('GET', 'HEAD'): request._cache_update_cache = False return None # Don't bother checking the cache. # try and get the cached GET response cache_key = get_cache_key(request, self.key_prefix, 'GET', cache=self.cache) if cache_key is None: request._cache_update_cache = True return None # No cache information available, need to rebuild. response = self.cache.get(cache_key, None) # if it wasn't found and we are looking for a HEAD, try looking just for that if response is None and request.method == 'HEAD': cache_key = get_cache_key(request, self.key_prefix, 'HEAD', cache=self.cache) response = self.cache.get(cache_key, None) if response is None: request._cache_update_cache = True return None # No cache information available, need to rebuild. # hit, return cached response request._cache_update_cache = False return response class CacheMiddleware(UpdateCacheMiddleware, FetchFromCacheMiddleware): """ Cache middleware that provides basic behavior for many simple sites. Also used as the hook point for the cache decorator, which is generated using the decorator-from-middleware utility. """ def __init__(self, cache_timeout=None, cache_anonymous_only=None, **kwargs): # We need to differentiate between "provided, but using default value", # and "not provided". If the value is provided using a default, then # we fall back to system defaults. If it is not provided at all, # we need to use middleware defaults. cache_kwargs = {} try: self.key_prefix = kwargs['key_prefix'] if self.key_prefix is not None: cache_kwargs['KEY_PREFIX'] = self.key_prefix else: self.key_prefix = '' except KeyError: self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX cache_kwargs['KEY_PREFIX'] = self.key_prefix try: self.cache_alias = kwargs['cache_alias'] if self.cache_alias is None: self.cache_alias = DEFAULT_CACHE_ALIAS if cache_timeout is not None: cache_kwargs['TIMEOUT'] = cache_timeout except KeyError: self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS if cache_timeout is None: cache_kwargs['TIMEOUT'] = settings.CACHE_MIDDLEWARE_SECONDS else: cache_kwargs['TIMEOUT'] = cache_timeout if cache_anonymous_only is None: self.cache_anonymous_only = getattr(settings, 'CACHE_MIDDLEWARE_ANONYMOUS_ONLY', False) else: self.cache_anonymous_only = cache_anonymous_only self.cache = get_cache(self.cache_alias, **cache_kwargs) self.cache_timeout = self.cache.default_timeout
bsd-3-clause
thaim/ansible
test/units/modules/network/ftd/test_ftd_file_upload.py
25
3368
from __future__ import absolute_import import pytest from ansible.module_utils import basic from units.modules.utils import set_module_args, exit_json, fail_json, AnsibleFailJson, AnsibleExitJson from ansible.modules.network.ftd import ftd_file_upload from ansible.module_utils.network.ftd.fdm_swagger_client import OperationField from ansible.module_utils.network.ftd.common import HTTPMethod class TestFtdFileUpload(object): module = ftd_file_upload @pytest.fixture(autouse=True) def module_mock(self, mocker): return mocker.patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json) @pytest.fixture def connection_mock(self, mocker): connection_class_mock = mocker.patch('ansible.modules.network.ftd.ftd_file_upload.Connection') return connection_class_mock.return_value @pytest.mark.parametrize("missing_arg", ['operation', 'file_to_upload']) def test_module_should_fail_without_required_args(self, missing_arg): module_args = {'operation': 'uploadFile', 'file_to_upload': '/tmp/test.txt'} del module_args[missing_arg] set_module_args(module_args) with pytest.raises(AnsibleFailJson) as ex: self.module.main() assert 'missing required arguments: %s' % missing_arg in str(ex.value) def test_module_should_fail_when_no_operation_spec_found(self, connection_mock): connection_mock.get_operation_spec.return_value = None set_module_args({'operation': 'nonExistingUploadOperation', 'file_to_upload': '/tmp/test.txt'}) with pytest.raises(AnsibleFailJson) as ex: self.module.main() result = ex.value.args[0] assert result['failed'] assert result['msg'] == 'Operation with specified name is not found: nonExistingUploadOperation' def test_module_should_fail_when_not_upload_operation_specified(self, connection_mock): connection_mock.get_operation_spec.return_value = { OperationField.METHOD: HTTPMethod.GET, OperationField.URL: '/object/network', OperationField.MODEL_NAME: 'NetworkObject' } set_module_args({'operation': 'nonUploadOperation', 'file_to_upload': '/tmp/test.txt'}) with pytest.raises(AnsibleFailJson) as ex: self.module.main() result = ex.value.args[0] assert result['failed'] assert result['msg'] == 'Invalid upload operation: nonUploadOperation. ' \ 'The operation must make POST request and return UploadStatus model.' def test_module_should_call_upload_and_return_response(self, connection_mock): connection_mock.get_operation_spec.return_value = { OperationField.METHOD: HTTPMethod.POST, OperationField.URL: '/uploadFile', OperationField.MODEL_NAME: 'FileUploadStatus' } connection_mock.upload_file.return_value = {'id': '123'} set_module_args({ 'operation': 'uploadFile', 'file_to_upload': '/tmp/test.txt' }) with pytest.raises(AnsibleExitJson) as ex: self.module.main() result = ex.value.args[0] assert result['changed'] assert {'id': '123'} == result['response'] connection_mock.upload_file.assert_called_once_with('/tmp/test.txt', '/uploadFile')
mit
sjsucohort6/openstack
python/venv/lib/python2.7/site-packages/keystoneauth1/loading/base.py
3
3953
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six import stevedore from keystoneauth1 import exceptions PLUGIN_NAMESPACE = 'keystoneauth1.plugin' __all__ = ['get_available_plugin_names', 'get_available_plugin_loaders', 'get_plugin_loader', 'get_plugin_options', 'BaseLoader', 'PLUGIN_NAMESPACE'] def get_available_plugin_names(): """Get the names of all the plugins that are available on the system. This is particularly useful for help and error text to prompt a user for example what plugins they may specify. :returns: A list of names. :rtype: frozenset """ mgr = stevedore.ExtensionManager(namespace=PLUGIN_NAMESPACE) return frozenset(mgr.names()) def get_available_plugin_loaders(): """Retrieve all the plugin classes available on the system. :returns: A dict with plugin entrypoint name as the key and the plugin loader as the value. :rtype: dict """ mgr = stevedore.ExtensionManager(namespace=PLUGIN_NAMESPACE, invoke_on_load=True, propagate_map_exceptions=True) return dict(mgr.map(lambda ext: (ext.entry_point.name, ext.obj))) def get_plugin_loader(name): """Retrieve a plugin class by its entrypoint name. :param str name: The name of the object to get. :returns: An auth plugin class. :rtype: :py:class:`keystoneauth1.loading.BaseLoader` :raises keystonauth.exceptions.NoMatchingPlugin: if a plugin cannot be created. """ try: mgr = stevedore.DriverManager(namespace=PLUGIN_NAMESPACE, invoke_on_load=True, name=name) except RuntimeError: raise exceptions.NoMatchingPlugin(name) return mgr.driver def get_plugin_options(name): """Get the options for a specific plugin. This will be the list of options that is registered and loaded by the specified plugin. :returns: A list of :py:class:`keystoneauth1.loading.Opt` options. :raises keystonauth.exceptions.NoMatchingPlugin: if a plugin cannot be created. """ return get_plugin_loader(name).get_options() @six.add_metaclass(abc.ABCMeta) class BaseLoader(object): @abc.abstractproperty def plugin_class(self): raise NotImplemented() @abc.abstractmethod def get_options(self): """Return the list of parameters associated with the auth plugin. This list may be used to generate CLI or config arguments. :returns: A list of Param objects describing available plugin parameters. :rtype: list """ return [] def load_from_options(self, **kwargs): """Create a plugin from the arguments retrieved from get_options. A client can override this function to do argument validation or to handle differences between the registered options and what is required to create the plugin. """ missing_required = [o for o in self.get_options() if o.required and kwargs.get(o.dest) is None] if missing_required: raise exceptions.MissingRequiredOptions(missing_required) return self.plugin_class(**kwargs)
mit
mcking49/apache-flask
Python/Lib/test/test_doctest.py
12
86542
# -*- coding: utf-8 -*- """ Test script for doctest. """ import sys from test import test_support import doctest # NOTE: There are some additional tests relating to interaction with # zipimport in the test_zipimport_support test module. ###################################################################### ## Sample Objects (used by test cases) ###################################################################### def sample_func(v): """ Blah blah >>> print sample_func(22) 44 Yee ha! """ return v+v class SampleClass: """ >>> print 1 1 >>> # comments get ignored. so are empty PS1 and PS2 prompts: >>> ... Multiline example: >>> sc = SampleClass(3) >>> for i in range(10): ... sc = sc.double() ... print sc.get(), 6 12 24 48 96 192 384 768 1536 3072 """ def __init__(self, val): """ >>> print SampleClass(12).get() 12 """ self.val = val def double(self): """ >>> print SampleClass(12).double().get() 24 """ return SampleClass(self.val + self.val) def get(self): """ >>> print SampleClass(-5).get() -5 """ return self.val def a_staticmethod(v): """ >>> print SampleClass.a_staticmethod(10) 11 """ return v+1 a_staticmethod = staticmethod(a_staticmethod) def a_classmethod(cls, v): """ >>> print SampleClass.a_classmethod(10) 12 >>> print SampleClass(0).a_classmethod(10) 12 """ return v+2 a_classmethod = classmethod(a_classmethod) a_property = property(get, doc=""" >>> print SampleClass(22).a_property 22 """) class NestedClass: """ >>> x = SampleClass.NestedClass(5) >>> y = x.square() >>> print y.get() 25 """ def __init__(self, val=0): """ >>> print SampleClass.NestedClass().get() 0 """ self.val = val def square(self): return SampleClass.NestedClass(self.val*self.val) def get(self): return self.val class SampleNewStyleClass(object): r""" >>> print '1\n2\n3' 1 2 3 """ def __init__(self, val): """ >>> print SampleNewStyleClass(12).get() 12 """ self.val = val def double(self): """ >>> print SampleNewStyleClass(12).double().get() 24 """ return SampleNewStyleClass(self.val + self.val) def get(self): """ >>> print SampleNewStyleClass(-5).get() -5 """ return self.val ###################################################################### ## Fake stdin (for testing interactive debugging) ###################################################################### class _FakeInput: """ A fake input stream for pdb's interactive debugger. Whenever a line is read, print it (to simulate the user typing it), and then return it. The set of lines to return is specified in the constructor; they should not have trailing newlines. """ def __init__(self, lines): self.lines = lines def readline(self): line = self.lines.pop(0) print line return line+'\n' ###################################################################### ## Test Cases ###################################################################### def test_Example(): r""" Unit tests for the `Example` class. Example is a simple container class that holds: - `source`: A source string. - `want`: An expected output string. - `exc_msg`: An expected exception message string (or None if no exception is expected). - `lineno`: A line number (within the docstring). - `indent`: The example's indentation in the input string. - `options`: An option dictionary, mapping option flags to True or False. These attributes are set by the constructor. `source` and `want` are required; the other attributes all have default values: >>> example = doctest.Example('print 1', '1\n') >>> (example.source, example.want, example.exc_msg, ... example.lineno, example.indent, example.options) ('print 1\n', '1\n', None, 0, 0, {}) The first three attributes (`source`, `want`, and `exc_msg`) may be specified positionally; the remaining arguments should be specified as keyword arguments: >>> exc_msg = 'IndexError: pop from an empty list' >>> example = doctest.Example('[].pop()', '', exc_msg, ... lineno=5, indent=4, ... options={doctest.ELLIPSIS: True}) >>> (example.source, example.want, example.exc_msg, ... example.lineno, example.indent, example.options) ('[].pop()\n', '', 'IndexError: pop from an empty list\n', 5, 4, {8: True}) The constructor normalizes the `source` string to end in a newline: Source spans a single line: no terminating newline. >>> e = doctest.Example('print 1', '1\n') >>> e.source, e.want ('print 1\n', '1\n') >>> e = doctest.Example('print 1\n', '1\n') >>> e.source, e.want ('print 1\n', '1\n') Source spans multiple lines: require terminating newline. >>> e = doctest.Example('print 1;\nprint 2\n', '1\n2\n') >>> e.source, e.want ('print 1;\nprint 2\n', '1\n2\n') >>> e = doctest.Example('print 1;\nprint 2', '1\n2\n') >>> e.source, e.want ('print 1;\nprint 2\n', '1\n2\n') Empty source string (which should never appear in real examples) >>> e = doctest.Example('', '') >>> e.source, e.want ('\n', '') The constructor normalizes the `want` string to end in a newline, unless it's the empty string: >>> e = doctest.Example('print 1', '1\n') >>> e.source, e.want ('print 1\n', '1\n') >>> e = doctest.Example('print 1', '1') >>> e.source, e.want ('print 1\n', '1\n') >>> e = doctest.Example('print', '') >>> e.source, e.want ('print\n', '') The constructor normalizes the `exc_msg` string to end in a newline, unless it's `None`: Message spans one line >>> exc_msg = 'IndexError: pop from an empty list' >>> e = doctest.Example('[].pop()', '', exc_msg) >>> e.exc_msg 'IndexError: pop from an empty list\n' >>> exc_msg = 'IndexError: pop from an empty list\n' >>> e = doctest.Example('[].pop()', '', exc_msg) >>> e.exc_msg 'IndexError: pop from an empty list\n' Message spans multiple lines >>> exc_msg = 'ValueError: 1\n 2' >>> e = doctest.Example('raise ValueError("1\n 2")', '', exc_msg) >>> e.exc_msg 'ValueError: 1\n 2\n' >>> exc_msg = 'ValueError: 1\n 2\n' >>> e = doctest.Example('raise ValueError("1\n 2")', '', exc_msg) >>> e.exc_msg 'ValueError: 1\n 2\n' Empty (but non-None) exception message (which should never appear in real examples) >>> exc_msg = '' >>> e = doctest.Example('raise X()', '', exc_msg) >>> e.exc_msg '\n' Compare `Example`: >>> example = doctest.Example('print 1', '1\n') >>> same_example = doctest.Example('print 1', '1\n') >>> other_example = doctest.Example('print 42', '42\n') >>> example == same_example True >>> example != same_example False >>> hash(example) == hash(same_example) True >>> example == other_example False >>> example != other_example True """ def test_DocTest(): r""" Unit tests for the `DocTest` class. DocTest is a collection of examples, extracted from a docstring, along with information about where the docstring comes from (a name, filename, and line number). The docstring is parsed by the `DocTest` constructor: >>> docstring = ''' ... >>> print 12 ... 12 ... ... Non-example text. ... ... >>> print 'another\example' ... another ... example ... ''' >>> globs = {} # globals to run the test in. >>> parser = doctest.DocTestParser() >>> test = parser.get_doctest(docstring, globs, 'some_test', ... 'some_file', 20) >>> print test <DocTest some_test from some_file:20 (2 examples)> >>> len(test.examples) 2 >>> e1, e2 = test.examples >>> (e1.source, e1.want, e1.lineno) ('print 12\n', '12\n', 1) >>> (e2.source, e2.want, e2.lineno) ("print 'another\\example'\n", 'another\nexample\n', 6) Source information (name, filename, and line number) is available as attributes on the doctest object: >>> (test.name, test.filename, test.lineno) ('some_test', 'some_file', 20) The line number of an example within its containing file is found by adding the line number of the example and the line number of its containing test: >>> test.lineno + e1.lineno 21 >>> test.lineno + e2.lineno 26 If the docstring contains inconsistant leading whitespace in the expected output of an example, then `DocTest` will raise a ValueError: >>> docstring = r''' ... >>> print 'bad\nindentation' ... bad ... indentation ... ''' >>> parser.get_doctest(docstring, globs, 'some_test', 'filename', 0) Traceback (most recent call last): ValueError: line 4 of the docstring for some_test has inconsistent leading whitespace: 'indentation' If the docstring contains inconsistent leading whitespace on continuation lines, then `DocTest` will raise a ValueError: >>> docstring = r''' ... >>> print ('bad indentation', ... ... 2) ... ('bad', 'indentation') ... ''' >>> parser.get_doctest(docstring, globs, 'some_test', 'filename', 0) Traceback (most recent call last): ValueError: line 2 of the docstring for some_test has inconsistent leading whitespace: '... 2)' If there's no blank space after a PS1 prompt ('>>>'), then `DocTest` will raise a ValueError: >>> docstring = '>>>print 1\n1' >>> parser.get_doctest(docstring, globs, 'some_test', 'filename', 0) Traceback (most recent call last): ValueError: line 1 of the docstring for some_test lacks blank after >>>: '>>>print 1' If there's no blank space after a PS2 prompt ('...'), then `DocTest` will raise a ValueError: >>> docstring = '>>> if 1:\n...print 1\n1' >>> parser.get_doctest(docstring, globs, 'some_test', 'filename', 0) Traceback (most recent call last): ValueError: line 2 of the docstring for some_test lacks blank after ...: '...print 1' Compare `DocTest`: >>> docstring = ''' ... >>> print 12 ... 12 ... ''' >>> test = parser.get_doctest(docstring, globs, 'some_test', ... 'some_test', 20) >>> same_test = parser.get_doctest(docstring, globs, 'some_test', ... 'some_test', 20) >>> test == same_test True >>> test != same_test False >>> hash(test) == hash(same_test) True >>> docstring = ''' ... >>> print 42 ... 42 ... ''' >>> other_test = parser.get_doctest(docstring, globs, 'other_test', ... 'other_file', 10) >>> test == other_test False >>> test != other_test True Compare `DocTestCase`: >>> DocTestCase = doctest.DocTestCase >>> test_case = DocTestCase(test) >>> same_test_case = DocTestCase(same_test) >>> other_test_case = DocTestCase(other_test) >>> test_case == same_test_case True >>> test_case != same_test_case False >>> hash(test_case) == hash(same_test_case) True >>> test == other_test_case False >>> test != other_test_case True """ def test_DocTestFinder(): r""" Unit tests for the `DocTestFinder` class. DocTestFinder is used to extract DocTests from an object's docstring and the docstrings of its contained objects. It can be used with modules, functions, classes, methods, staticmethods, classmethods, and properties. Finding Tests in Functions ~~~~~~~~~~~~~~~~~~~~~~~~~~ For a function whose docstring contains examples, DocTestFinder.find() will return a single test (for that function's docstring): >>> finder = doctest.DocTestFinder() We'll simulate a __file__ attr that ends in pyc: >>> import test.test_doctest >>> old = test.test_doctest.__file__ >>> test.test_doctest.__file__ = 'test_doctest.pyc' >>> tests = finder.find(sample_func) >>> print tests # doctest: +ELLIPSIS [<DocTest sample_func from ...:17 (1 example)>] The exact name depends on how test_doctest was invoked, so allow for leading path components. >>> tests[0].filename # doctest: +ELLIPSIS '...test_doctest.py' >>> test.test_doctest.__file__ = old >>> e = tests[0].examples[0] >>> (e.source, e.want, e.lineno) ('print sample_func(22)\n', '44\n', 3) By default, tests are created for objects with no docstring: >>> def no_docstring(v): ... pass >>> finder.find(no_docstring) [] However, the optional argument `exclude_empty` to the DocTestFinder constructor can be used to exclude tests for objects with empty docstrings: >>> def no_docstring(v): ... pass >>> excl_empty_finder = doctest.DocTestFinder(exclude_empty=True) >>> excl_empty_finder.find(no_docstring) [] If the function has a docstring with no examples, then a test with no examples is returned. (This lets `DocTestRunner` collect statistics about which functions have no tests -- but is that useful? And should an empty test also be created when there's no docstring?) >>> def no_examples(v): ... ''' no doctest examples ''' >>> finder.find(no_examples) # doctest: +ELLIPSIS [<DocTest no_examples from ...:1 (no examples)>] Finding Tests in Classes ~~~~~~~~~~~~~~~~~~~~~~~~ For a class, DocTestFinder will create a test for the class's docstring, and will recursively explore its contents, including methods, classmethods, staticmethods, properties, and nested classes. >>> finder = doctest.DocTestFinder() >>> tests = finder.find(SampleClass) >>> for t in tests: ... print '%2s %s' % (len(t.examples), t.name) 3 SampleClass 3 SampleClass.NestedClass 1 SampleClass.NestedClass.__init__ 1 SampleClass.__init__ 2 SampleClass.a_classmethod 1 SampleClass.a_property 1 SampleClass.a_staticmethod 1 SampleClass.double 1 SampleClass.get New-style classes are also supported: >>> tests = finder.find(SampleNewStyleClass) >>> for t in tests: ... print '%2s %s' % (len(t.examples), t.name) 1 SampleNewStyleClass 1 SampleNewStyleClass.__init__ 1 SampleNewStyleClass.double 1 SampleNewStyleClass.get Finding Tests in Modules ~~~~~~~~~~~~~~~~~~~~~~~~ For a module, DocTestFinder will create a test for the class's docstring, and will recursively explore its contents, including functions, classes, and the `__test__` dictionary, if it exists: >>> # A module >>> import types >>> m = types.ModuleType('some_module') >>> def triple(val): ... ''' ... >>> print triple(11) ... 33 ... ''' ... return val*3 >>> m.__dict__.update({ ... 'sample_func': sample_func, ... 'SampleClass': SampleClass, ... '__doc__': ''' ... Module docstring. ... >>> print 'module' ... module ... ''', ... '__test__': { ... 'd': '>>> print 6\n6\n>>> print 7\n7\n', ... 'c': triple}}) >>> finder = doctest.DocTestFinder() >>> # Use module=test.test_doctest, to prevent doctest from >>> # ignoring the objects since they weren't defined in m. >>> import test.test_doctest >>> tests = finder.find(m, module=test.test_doctest) >>> for t in tests: ... print '%2s %s' % (len(t.examples), t.name) 1 some_module 3 some_module.SampleClass 3 some_module.SampleClass.NestedClass 1 some_module.SampleClass.NestedClass.__init__ 1 some_module.SampleClass.__init__ 2 some_module.SampleClass.a_classmethod 1 some_module.SampleClass.a_property 1 some_module.SampleClass.a_staticmethod 1 some_module.SampleClass.double 1 some_module.SampleClass.get 1 some_module.__test__.c 2 some_module.__test__.d 1 some_module.sample_func Duplicate Removal ~~~~~~~~~~~~~~~~~ If a single object is listed twice (under different names), then tests will only be generated for it once: >>> from test import doctest_aliases >>> assert doctest_aliases.TwoNames.f >>> assert doctest_aliases.TwoNames.g >>> tests = excl_empty_finder.find(doctest_aliases) >>> print len(tests) 2 >>> print tests[0].name test.doctest_aliases.TwoNames TwoNames.f and TwoNames.g are bound to the same object. We can't guess which will be found in doctest's traversal of TwoNames.__dict__ first, so we have to allow for either. >>> tests[1].name.split('.')[-1] in ['f', 'g'] True Empty Tests ~~~~~~~~~~~ By default, an object with no doctests doesn't create any tests: >>> tests = doctest.DocTestFinder().find(SampleClass) >>> for t in tests: ... print '%2s %s' % (len(t.examples), t.name) 3 SampleClass 3 SampleClass.NestedClass 1 SampleClass.NestedClass.__init__ 1 SampleClass.__init__ 2 SampleClass.a_classmethod 1 SampleClass.a_property 1 SampleClass.a_staticmethod 1 SampleClass.double 1 SampleClass.get By default, that excluded objects with no doctests. exclude_empty=False tells it to include (empty) tests for objects with no doctests. This feature is really to support backward compatibility in what doctest.master.summarize() displays. >>> tests = doctest.DocTestFinder(exclude_empty=False).find(SampleClass) >>> for t in tests: ... print '%2s %s' % (len(t.examples), t.name) 3 SampleClass 3 SampleClass.NestedClass 1 SampleClass.NestedClass.__init__ 0 SampleClass.NestedClass.get 0 SampleClass.NestedClass.square 1 SampleClass.__init__ 2 SampleClass.a_classmethod 1 SampleClass.a_property 1 SampleClass.a_staticmethod 1 SampleClass.double 1 SampleClass.get Turning off Recursion ~~~~~~~~~~~~~~~~~~~~~ DocTestFinder can be told not to look for tests in contained objects using the `recurse` flag: >>> tests = doctest.DocTestFinder(recurse=False).find(SampleClass) >>> for t in tests: ... print '%2s %s' % (len(t.examples), t.name) 3 SampleClass Line numbers ~~~~~~~~~~~~ DocTestFinder finds the line number of each example: >>> def f(x): ... ''' ... >>> x = 12 ... ... some text ... ... >>> # examples are not created for comments & bare prompts. ... >>> ... ... ... ... >>> for x in range(10): ... ... print x, ... 0 1 2 3 4 5 6 7 8 9 ... >>> x//2 ... 6 ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> [e.lineno for e in test.examples] [1, 9, 12] """ def test_DocTestParser(): r""" Unit tests for the `DocTestParser` class. DocTestParser is used to parse docstrings containing doctest examples. The `parse` method divides a docstring into examples and intervening text: >>> s = ''' ... >>> x, y = 2, 3 # no output expected ... >>> if 1: ... ... print x ... ... print y ... 2 ... 3 ... ... Some text. ... >>> x+y ... 5 ... ''' >>> parser = doctest.DocTestParser() >>> for piece in parser.parse(s): ... if isinstance(piece, doctest.Example): ... print 'Example:', (piece.source, piece.want, piece.lineno) ... else: ... print ' Text:', `piece` Text: '\n' Example: ('x, y = 2, 3 # no output expected\n', '', 1) Text: '' Example: ('if 1:\n print x\n print y\n', '2\n3\n', 2) Text: '\nSome text.\n' Example: ('x+y\n', '5\n', 9) Text: '' The `get_examples` method returns just the examples: >>> for piece in parser.get_examples(s): ... print (piece.source, piece.want, piece.lineno) ('x, y = 2, 3 # no output expected\n', '', 1) ('if 1:\n print x\n print y\n', '2\n3\n', 2) ('x+y\n', '5\n', 9) The `get_doctest` method creates a Test from the examples, along with the given arguments: >>> test = parser.get_doctest(s, {}, 'name', 'filename', lineno=5) >>> (test.name, test.filename, test.lineno) ('name', 'filename', 5) >>> for piece in test.examples: ... print (piece.source, piece.want, piece.lineno) ('x, y = 2, 3 # no output expected\n', '', 1) ('if 1:\n print x\n print y\n', '2\n3\n', 2) ('x+y\n', '5\n', 9) """ class test_DocTestRunner: def basics(): r""" Unit tests for the `DocTestRunner` class. DocTestRunner is used to run DocTest test cases, and to accumulate statistics. Here's a simple DocTest case we can use: >>> def f(x): ... ''' ... >>> x = 12 ... >>> print x ... 12 ... >>> x//2 ... 6 ... ''' >>> test = doctest.DocTestFinder().find(f)[0] The main DocTestRunner interface is the `run` method, which runs a given DocTest case in a given namespace (globs). It returns a tuple `(f,t)`, where `f` is the number of failed tests and `t` is the number of tried tests. >>> doctest.DocTestRunner(verbose=False).run(test) TestResults(failed=0, attempted=3) If any example produces incorrect output, then the test runner reports the failure and proceeds to the next example: >>> def f(x): ... ''' ... >>> x = 12 ... >>> print x ... 14 ... >>> x//2 ... 6 ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=True).run(test) ... # doctest: +ELLIPSIS Trying: x = 12 Expecting nothing ok Trying: print x Expecting: 14 ********************************************************************** File ..., line 4, in f Failed example: print x Expected: 14 Got: 12 Trying: x//2 Expecting: 6 ok TestResults(failed=1, attempted=3) """ def verbose_flag(): r""" The `verbose` flag makes the test runner generate more detailed output: >>> def f(x): ... ''' ... >>> x = 12 ... >>> print x ... 12 ... >>> x//2 ... 6 ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=True).run(test) Trying: x = 12 Expecting nothing ok Trying: print x Expecting: 12 ok Trying: x//2 Expecting: 6 ok TestResults(failed=0, attempted=3) If the `verbose` flag is unspecified, then the output will be verbose iff `-v` appears in sys.argv: >>> # Save the real sys.argv list. >>> old_argv = sys.argv >>> # If -v does not appear in sys.argv, then output isn't verbose. >>> sys.argv = ['test'] >>> doctest.DocTestRunner().run(test) TestResults(failed=0, attempted=3) >>> # If -v does appear in sys.argv, then output is verbose. >>> sys.argv = ['test', '-v'] >>> doctest.DocTestRunner().run(test) Trying: x = 12 Expecting nothing ok Trying: print x Expecting: 12 ok Trying: x//2 Expecting: 6 ok TestResults(failed=0, attempted=3) >>> # Restore sys.argv >>> sys.argv = old_argv In the remaining examples, the test runner's verbosity will be explicitly set, to ensure that the test behavior is consistent. """ def exceptions(): r""" Tests of `DocTestRunner`'s exception handling. An expected exception is specified with a traceback message. The lines between the first line and the type/value may be omitted or replaced with any other string: >>> def f(x): ... ''' ... >>> x = 12 ... >>> print x//0 ... Traceback (most recent call last): ... ZeroDivisionError: integer division or modulo by zero ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) TestResults(failed=0, attempted=2) An example may not generate output before it raises an exception; if it does, then the traceback message will not be recognized as signaling an expected exception, so the example will be reported as an unexpected exception: >>> def f(x): ... ''' ... >>> x = 12 ... >>> print 'pre-exception output', x//0 ... pre-exception output ... Traceback (most recent call last): ... ZeroDivisionError: integer division or modulo by zero ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 4, in f Failed example: print 'pre-exception output', x//0 Exception raised: ... ZeroDivisionError: integer division or modulo by zero TestResults(failed=1, attempted=2) Exception messages may contain newlines: >>> def f(x): ... r''' ... >>> raise ValueError, 'multi\nline\nmessage' ... Traceback (most recent call last): ... ValueError: multi ... line ... message ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) TestResults(failed=0, attempted=1) If an exception is expected, but an exception with the wrong type or message is raised, then it is reported as a failure: >>> def f(x): ... r''' ... >>> raise ValueError, 'message' ... Traceback (most recent call last): ... ValueError: wrong message ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 3, in f Failed example: raise ValueError, 'message' Expected: Traceback (most recent call last): ValueError: wrong message Got: Traceback (most recent call last): ... ValueError: message TestResults(failed=1, attempted=1) However, IGNORE_EXCEPTION_DETAIL can be used to allow a mismatch in the detail: >>> def f(x): ... r''' ... >>> raise ValueError, 'message' #doctest: +IGNORE_EXCEPTION_DETAIL ... Traceback (most recent call last): ... ValueError: wrong message ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) TestResults(failed=0, attempted=1) IGNORE_EXCEPTION_DETAIL also ignores difference in exception formatting between Python versions. For example, in Python 3.x, the module path of the exception is in the output, but this will fail under Python 2: >>> def f(x): ... r''' ... >>> from httplib import HTTPException ... >>> raise HTTPException('message') ... Traceback (most recent call last): ... httplib.HTTPException: message ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 4, in f Failed example: raise HTTPException('message') Expected: Traceback (most recent call last): httplib.HTTPException: message Got: Traceback (most recent call last): ... HTTPException: message TestResults(failed=1, attempted=2) But in Python 2 the module path is not included, an therefore a test must look like the following test to succeed in Python 2. But that test will fail under Python 3. >>> def f(x): ... r''' ... >>> from httplib import HTTPException ... >>> raise HTTPException('message') ... Traceback (most recent call last): ... HTTPException: message ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) TestResults(failed=0, attempted=2) However, with IGNORE_EXCEPTION_DETAIL, the module name of the exception (if any) will be ignored: >>> def f(x): ... r''' ... >>> from httplib import HTTPException ... >>> raise HTTPException('message') #doctest: +IGNORE_EXCEPTION_DETAIL ... Traceback (most recent call last): ... HTTPException: message ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) TestResults(failed=0, attempted=2) The module path will be completely ignored, so two different module paths will still pass if IGNORE_EXCEPTION_DETAIL is given. This is intentional, so it can be used when exceptions have changed module. >>> def f(x): ... r''' ... >>> from httplib import HTTPException ... >>> raise HTTPException('message') #doctest: +IGNORE_EXCEPTION_DETAIL ... Traceback (most recent call last): ... foo.bar.HTTPException: message ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) TestResults(failed=0, attempted=2) But IGNORE_EXCEPTION_DETAIL does not allow a mismatch in the exception type: >>> def f(x): ... r''' ... >>> raise ValueError, 'message' #doctest: +IGNORE_EXCEPTION_DETAIL ... Traceback (most recent call last): ... TypeError: wrong type ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 3, in f Failed example: raise ValueError, 'message' #doctest: +IGNORE_EXCEPTION_DETAIL Expected: Traceback (most recent call last): TypeError: wrong type Got: Traceback (most recent call last): ... ValueError: message TestResults(failed=1, attempted=1) If the exception does not have a message, you can still use IGNORE_EXCEPTION_DETAIL to normalize the modules between Python 2 and 3: >>> def f(x): ... r''' ... >>> from Queue import Empty ... >>> raise Empty() #doctest: +IGNORE_EXCEPTION_DETAIL ... Traceback (most recent call last): ... foo.bar.Empty ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) TestResults(failed=0, attempted=2) Note that a trailing colon doesn't matter either: >>> def f(x): ... r''' ... >>> from Queue import Empty ... >>> raise Empty() #doctest: +IGNORE_EXCEPTION_DETAIL ... Traceback (most recent call last): ... foo.bar.Empty: ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) TestResults(failed=0, attempted=2) If an exception is raised but not expected, then it is reported as an unexpected exception: >>> def f(x): ... r''' ... >>> 1//0 ... 0 ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 3, in f Failed example: 1//0 Exception raised: Traceback (most recent call last): ... ZeroDivisionError: integer division or modulo by zero TestResults(failed=1, attempted=1) """ def displayhook(): r""" Test that changing sys.displayhook doesn't matter for doctest. >>> import sys >>> orig_displayhook = sys.displayhook >>> def my_displayhook(x): ... print('hi!') >>> sys.displayhook = my_displayhook >>> def f(): ... ''' ... >>> 3 ... 3 ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> r = doctest.DocTestRunner(verbose=False).run(test) >>> post_displayhook = sys.displayhook We need to restore sys.displayhook now, so that we'll be able to test results. >>> sys.displayhook = orig_displayhook Ok, now we can check that everything is ok. >>> r TestResults(failed=0, attempted=1) >>> post_displayhook is my_displayhook True """ def optionflags(): r""" Tests of `DocTestRunner`'s option flag handling. Several option flags can be used to customize the behavior of the test runner. These are defined as module constants in doctest, and passed to the DocTestRunner constructor (multiple constants should be ORed together). The DONT_ACCEPT_TRUE_FOR_1 flag disables matches between True/False and 1/0: >>> def f(x): ... '>>> True\n1\n' >>> # Without the flag: >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) TestResults(failed=0, attempted=1) >>> # With the flag: >>> test = doctest.DocTestFinder().find(f)[0] >>> flags = doctest.DONT_ACCEPT_TRUE_FOR_1 >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 2, in f Failed example: True Expected: 1 Got: True TestResults(failed=1, attempted=1) The DONT_ACCEPT_BLANKLINE flag disables the match between blank lines and the '<BLANKLINE>' marker: >>> def f(x): ... '>>> print "a\\n\\nb"\na\n<BLANKLINE>\nb\n' >>> # Without the flag: >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) TestResults(failed=0, attempted=1) >>> # With the flag: >>> test = doctest.DocTestFinder().find(f)[0] >>> flags = doctest.DONT_ACCEPT_BLANKLINE >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 2, in f Failed example: print "a\n\nb" Expected: a <BLANKLINE> b Got: a <BLANKLINE> b TestResults(failed=1, attempted=1) The NORMALIZE_WHITESPACE flag causes all sequences of whitespace to be treated as equal: >>> def f(x): ... '>>> print 1, 2, 3\n 1 2\n 3' >>> # Without the flag: >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 2, in f Failed example: print 1, 2, 3 Expected: 1 2 3 Got: 1 2 3 TestResults(failed=1, attempted=1) >>> # With the flag: >>> test = doctest.DocTestFinder().find(f)[0] >>> flags = doctest.NORMALIZE_WHITESPACE >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test) TestResults(failed=0, attempted=1) An example from the docs: >>> print range(20) #doctest: +NORMALIZE_WHITESPACE [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] The ELLIPSIS flag causes ellipsis marker ("...") in the expected output to match any substring in the actual output: >>> def f(x): ... '>>> print range(15)\n[0, 1, 2, ..., 14]\n' >>> # Without the flag: >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 2, in f Failed example: print range(15) Expected: [0, 1, 2, ..., 14] Got: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14] TestResults(failed=1, attempted=1) >>> # With the flag: >>> test = doctest.DocTestFinder().find(f)[0] >>> flags = doctest.ELLIPSIS >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test) TestResults(failed=0, attempted=1) ... also matches nothing: >>> for i in range(100): ... print i**2, #doctest: +ELLIPSIS 0 1...4...9 16 ... 36 49 64 ... 9801 ... can be surprising; e.g., this test passes: >>> for i in range(21): #doctest: +ELLIPSIS ... print i, 0 1 2 ...1...2...0 Examples from the docs: >>> print range(20) # doctest:+ELLIPSIS [0, 1, ..., 18, 19] >>> print range(20) # doctest: +ELLIPSIS ... # doctest: +NORMALIZE_WHITESPACE [0, 1, ..., 18, 19] The SKIP flag causes an example to be skipped entirely. I.e., the example is not run. It can be useful in contexts where doctest examples serve as both documentation and test cases, and an example should be included for documentation purposes, but should not be checked (e.g., because its output is random, or depends on resources which would be unavailable.) The SKIP flag can also be used for 'commenting out' broken examples. >>> import unavailable_resource # doctest: +SKIP >>> unavailable_resource.do_something() # doctest: +SKIP >>> unavailable_resource.blow_up() # doctest: +SKIP Traceback (most recent call last): ... UncheckedBlowUpError: Nobody checks me. >>> import random >>> print random.random() # doctest: +SKIP 0.721216923889 The REPORT_UDIFF flag causes failures that involve multi-line expected and actual outputs to be displayed using a unified diff: >>> def f(x): ... r''' ... >>> print '\n'.join('abcdefg') ... a ... B ... c ... d ... f ... g ... h ... ''' >>> # Without the flag: >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 3, in f Failed example: print '\n'.join('abcdefg') Expected: a B c d f g h Got: a b c d e f g TestResults(failed=1, attempted=1) >>> # With the flag: >>> test = doctest.DocTestFinder().find(f)[0] >>> flags = doctest.REPORT_UDIFF >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 3, in f Failed example: print '\n'.join('abcdefg') Differences (unified diff with -expected +actual): @@ -1,7 +1,7 @@ a -B +b c d +e f g -h TestResults(failed=1, attempted=1) The REPORT_CDIFF flag causes failures that involve multi-line expected and actual outputs to be displayed using a context diff: >>> # Reuse f() from the REPORT_UDIFF example, above. >>> test = doctest.DocTestFinder().find(f)[0] >>> flags = doctest.REPORT_CDIFF >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 3, in f Failed example: print '\n'.join('abcdefg') Differences (context diff with expected followed by actual): *************** *** 1,7 **** a ! B c d f g - h --- 1,7 ---- a ! b c d + e f g TestResults(failed=1, attempted=1) The REPORT_NDIFF flag causes failures to use the difflib.Differ algorithm used by the popular ndiff.py utility. This does intraline difference marking, as well as interline differences. >>> def f(x): ... r''' ... >>> print "a b c d e f g h i j k l m" ... a b c d e f g h i j k 1 m ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> flags = doctest.REPORT_NDIFF >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 3, in f Failed example: print "a b c d e f g h i j k l m" Differences (ndiff with -expected +actual): - a b c d e f g h i j k 1 m ? ^ + a b c d e f g h i j k l m ? + ++ ^ TestResults(failed=1, attempted=1) The REPORT_ONLY_FIRST_FAILURE suppresses result output after the first failing example: >>> def f(x): ... r''' ... >>> print 1 # first success ... 1 ... >>> print 2 # first failure ... 200 ... >>> print 3 # second failure ... 300 ... >>> print 4 # second success ... 4 ... >>> print 5 # third failure ... 500 ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> flags = doctest.REPORT_ONLY_FIRST_FAILURE >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 5, in f Failed example: print 2 # first failure Expected: 200 Got: 2 TestResults(failed=3, attempted=5) However, output from `report_start` is not suppressed: >>> doctest.DocTestRunner(verbose=True, optionflags=flags).run(test) ... # doctest: +ELLIPSIS Trying: print 1 # first success Expecting: 1 ok Trying: print 2 # first failure Expecting: 200 ********************************************************************** File ..., line 5, in f Failed example: print 2 # first failure Expected: 200 Got: 2 TestResults(failed=3, attempted=5) For the purposes of REPORT_ONLY_FIRST_FAILURE, unexpected exceptions count as failures: >>> def f(x): ... r''' ... >>> print 1 # first success ... 1 ... >>> raise ValueError(2) # first failure ... 200 ... >>> print 3 # second failure ... 300 ... >>> print 4 # second success ... 4 ... >>> print 5 # third failure ... 500 ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> flags = doctest.REPORT_ONLY_FIRST_FAILURE >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 5, in f Failed example: raise ValueError(2) # first failure Exception raised: ... ValueError: 2 TestResults(failed=3, attempted=5) New option flags can also be registered, via register_optionflag(). Here we reach into doctest's internals a bit. >>> unlikely = "UNLIKELY_OPTION_NAME" >>> unlikely in doctest.OPTIONFLAGS_BY_NAME False >>> new_flag_value = doctest.register_optionflag(unlikely) >>> unlikely in doctest.OPTIONFLAGS_BY_NAME True Before 2.4.4/2.5, registering a name more than once erroneously created more than one flag value. Here we verify that's fixed: >>> redundant_flag_value = doctest.register_optionflag(unlikely) >>> redundant_flag_value == new_flag_value True Clean up. >>> del doctest.OPTIONFLAGS_BY_NAME[unlikely] """ def option_directives(): r""" Tests of `DocTestRunner`'s option directive mechanism. Option directives can be used to turn option flags on or off for a single example. To turn an option on for an example, follow that example with a comment of the form ``# doctest: +OPTION``: >>> def f(x): r''' ... >>> print range(10) # should fail: no ellipsis ... [0, 1, ..., 9] ... ... >>> print range(10) # doctest: +ELLIPSIS ... [0, 1, ..., 9] ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 2, in f Failed example: print range(10) # should fail: no ellipsis Expected: [0, 1, ..., 9] Got: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] TestResults(failed=1, attempted=2) To turn an option off for an example, follow that example with a comment of the form ``# doctest: -OPTION``: >>> def f(x): r''' ... >>> print range(10) ... [0, 1, ..., 9] ... ... >>> # should fail: no ellipsis ... >>> print range(10) # doctest: -ELLIPSIS ... [0, 1, ..., 9] ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False, ... optionflags=doctest.ELLIPSIS).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 6, in f Failed example: print range(10) # doctest: -ELLIPSIS Expected: [0, 1, ..., 9] Got: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] TestResults(failed=1, attempted=2) Option directives affect only the example that they appear with; they do not change the options for surrounding examples: >>> def f(x): r''' ... >>> print range(10) # Should fail: no ellipsis ... [0, 1, ..., 9] ... ... >>> print range(10) # doctest: +ELLIPSIS ... [0, 1, ..., 9] ... ... >>> print range(10) # Should fail: no ellipsis ... [0, 1, ..., 9] ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 2, in f Failed example: print range(10) # Should fail: no ellipsis Expected: [0, 1, ..., 9] Got: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] ********************************************************************** File ..., line 8, in f Failed example: print range(10) # Should fail: no ellipsis Expected: [0, 1, ..., 9] Got: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] TestResults(failed=2, attempted=3) Multiple options may be modified by a single option directive. They may be separated by whitespace, commas, or both: >>> def f(x): r''' ... >>> print range(10) # Should fail ... [0, 1, ..., 9] ... >>> print range(10) # Should succeed ... ... # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE ... [0, 1, ..., 9] ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 2, in f Failed example: print range(10) # Should fail Expected: [0, 1, ..., 9] Got: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] TestResults(failed=1, attempted=2) >>> def f(x): r''' ... >>> print range(10) # Should fail ... [0, 1, ..., 9] ... >>> print range(10) # Should succeed ... ... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE ... [0, 1, ..., 9] ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 2, in f Failed example: print range(10) # Should fail Expected: [0, 1, ..., 9] Got: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] TestResults(failed=1, attempted=2) >>> def f(x): r''' ... >>> print range(10) # Should fail ... [0, 1, ..., 9] ... >>> print range(10) # Should succeed ... ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE ... [0, 1, ..., 9] ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 2, in f Failed example: print range(10) # Should fail Expected: [0, 1, ..., 9] Got: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] TestResults(failed=1, attempted=2) The option directive may be put on the line following the source, as long as a continuation prompt is used: >>> def f(x): r''' ... >>> print range(10) ... ... # doctest: +ELLIPSIS ... [0, 1, ..., 9] ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) TestResults(failed=0, attempted=1) For examples with multi-line source, the option directive may appear at the end of any line: >>> def f(x): r''' ... >>> for x in range(10): # doctest: +ELLIPSIS ... ... print x, ... 0 1 2 ... 9 ... ... >>> for x in range(10): ... ... print x, # doctest: +ELLIPSIS ... 0 1 2 ... 9 ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) TestResults(failed=0, attempted=2) If more than one line of an example with multi-line source has an option directive, then they are combined: >>> def f(x): r''' ... Should fail (option directive not on the last line): ... >>> for x in range(10): # doctest: +ELLIPSIS ... ... print x, # doctest: +NORMALIZE_WHITESPACE ... 0 1 2...9 ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) TestResults(failed=0, attempted=1) It is an error to have a comment of the form ``# doctest:`` that is *not* followed by words of the form ``+OPTION`` or ``-OPTION``, where ``OPTION`` is an option that has been registered with `register_option`: >>> # Error: Option not registered >>> s = '>>> print 12 #doctest: +BADOPTION' >>> test = doctest.DocTestParser().get_doctest(s, {}, 's', 's.py', 0) Traceback (most recent call last): ValueError: line 1 of the doctest for s has an invalid option: '+BADOPTION' >>> # Error: No + or - prefix >>> s = '>>> print 12 #doctest: ELLIPSIS' >>> test = doctest.DocTestParser().get_doctest(s, {}, 's', 's.py', 0) Traceback (most recent call last): ValueError: line 1 of the doctest for s has an invalid option: 'ELLIPSIS' It is an error to use an option directive on a line that contains no source: >>> s = '>>> # doctest: +ELLIPSIS' >>> test = doctest.DocTestParser().get_doctest(s, {}, 's', 's.py', 0) Traceback (most recent call last): ValueError: line 0 of the doctest for s has an option directive on a line with no example: '# doctest: +ELLIPSIS' """ def test_unicode_output(self): r""" Check that unicode output works: >>> u'\xe9' u'\xe9' If we return unicode, SpoofOut's buf variable becomes automagically converted to unicode. This means all subsequent output becomes converted to unicode, and if the output contains non-ascii characters that failed. It used to be that this state change carried on between tests, meaning tests would fail if unicode has been output previously in the testrun. This test tests that this is no longer so: >>> print u'abc' abc And then return a string with non-ascii characters: >>> print u'\xe9'.encode('utf-8') é """ def test_testsource(): r""" Unit tests for `testsource()`. The testsource() function takes a module and a name, finds the (first) test with that name in that module, and converts it to a script. The example code is converted to regular Python code. The surrounding words and expected output are converted to comments: >>> import test.test_doctest >>> name = 'test.test_doctest.sample_func' >>> print doctest.testsource(test.test_doctest, name) # Blah blah # print sample_func(22) # Expected: ## 44 # # Yee ha! <BLANKLINE> >>> name = 'test.test_doctest.SampleNewStyleClass' >>> print doctest.testsource(test.test_doctest, name) print '1\n2\n3' # Expected: ## 1 ## 2 ## 3 <BLANKLINE> >>> name = 'test.test_doctest.SampleClass.a_classmethod' >>> print doctest.testsource(test.test_doctest, name) print SampleClass.a_classmethod(10) # Expected: ## 12 print SampleClass(0).a_classmethod(10) # Expected: ## 12 <BLANKLINE> """ def test_debug(): r""" Create a docstring that we want to debug: >>> s = ''' ... >>> x = 12 ... >>> print x ... 12 ... ''' Create some fake stdin input, to feed to the debugger: >>> import tempfile >>> real_stdin = sys.stdin >>> sys.stdin = _FakeInput(['next', 'print x', 'continue']) Run the debugger on the docstring, and then restore sys.stdin. >>> try: doctest.debug_src(s) ... finally: sys.stdin = real_stdin > <string>(1)<module>() (Pdb) next 12 --Return-- > <string>(1)<module>()->None (Pdb) print x 12 (Pdb) continue """ def test_pdb_set_trace(): """Using pdb.set_trace from a doctest. You can use pdb.set_trace from a doctest. To do so, you must retrieve the set_trace function from the pdb module at the time you use it. The doctest module changes sys.stdout so that it can capture program output. It also temporarily replaces pdb.set_trace with a version that restores stdout. This is necessary for you to see debugger output. >>> doc = ''' ... >>> x = 42 ... >>> raise Exception('clé') ... Traceback (most recent call last): ... Exception: clé ... >>> import pdb; pdb.set_trace() ... ''' >>> parser = doctest.DocTestParser() >>> test = parser.get_doctest(doc, {}, "foo-bär@baz", "foo-bär@baz.py", 0) >>> runner = doctest.DocTestRunner(verbose=False) To demonstrate this, we'll create a fake standard input that captures our debugger input: >>> import tempfile >>> real_stdin = sys.stdin >>> sys.stdin = _FakeInput([ ... 'print x', # print data defined by the example ... 'continue', # stop debugging ... '']) >>> try: runner.run(test) ... finally: sys.stdin = real_stdin --Return-- > <doctest foo-bär@baz[2]>(1)<module>()->None -> import pdb; pdb.set_trace() (Pdb) print x 42 (Pdb) continue TestResults(failed=0, attempted=3) You can also put pdb.set_trace in a function called from a test: >>> def calls_set_trace(): ... y=2 ... import pdb; pdb.set_trace() >>> doc = ''' ... >>> x=1 ... >>> calls_set_trace() ... ''' >>> test = parser.get_doctest(doc, globals(), "foo-bär@baz", "foo-bär@baz.py", 0) >>> real_stdin = sys.stdin >>> sys.stdin = _FakeInput([ ... 'print y', # print data defined in the function ... 'up', # out of function ... 'print x', # print data defined by the example ... 'continue', # stop debugging ... '']) >>> try: ... runner.run(test) ... finally: ... sys.stdin = real_stdin --Return-- > <doctest test.test_doctest.test_pdb_set_trace[8]>(3)calls_set_trace()->None -> import pdb; pdb.set_trace() (Pdb) print y 2 (Pdb) up > <doctest foo-bär@baz[1]>(1)<module>() -> calls_set_trace() (Pdb) print x 1 (Pdb) continue TestResults(failed=0, attempted=2) During interactive debugging, source code is shown, even for doctest examples: >>> doc = ''' ... >>> def f(x): ... ... g(x*2) ... >>> def g(x): ... ... print x+3 ... ... import pdb; pdb.set_trace() ... >>> f(3) ... ''' >>> test = parser.get_doctest(doc, globals(), "foo-bär@baz", "foo-bär@baz.py", 0) >>> real_stdin = sys.stdin >>> sys.stdin = _FakeInput([ ... 'list', # list source from example 2 ... 'next', # return from g() ... 'list', # list source from example 1 ... 'next', # return from f() ... 'list', # list source from example 3 ... 'continue', # stop debugging ... '']) >>> try: runner.run(test) ... finally: sys.stdin = real_stdin ... # doctest: +NORMALIZE_WHITESPACE --Return-- > <doctest foo-bär@baz[1]>(3)g()->None -> import pdb; pdb.set_trace() (Pdb) list 1 def g(x): 2 print x+3 3 -> import pdb; pdb.set_trace() [EOF] (Pdb) next --Return-- > <doctest foo-bär@baz[0]>(2)f()->None -> g(x*2) (Pdb) list 1 def f(x): 2 -> g(x*2) [EOF] (Pdb) next --Return-- > <doctest foo-bär@baz[2]>(1)<module>()->None -> f(3) (Pdb) list 1 -> f(3) [EOF] (Pdb) continue ********************************************************************** File "foo-bär@baz.py", line 7, in foo-bär@baz Failed example: f(3) Expected nothing Got: 9 TestResults(failed=1, attempted=3) """ def test_pdb_set_trace_nested(): """This illustrates more-demanding use of set_trace with nested functions. >>> class C(object): ... def calls_set_trace(self): ... y = 1 ... import pdb; pdb.set_trace() ... self.f1() ... y = 2 ... def f1(self): ... x = 1 ... self.f2() ... x = 2 ... def f2(self): ... z = 1 ... z = 2 >>> calls_set_trace = C().calls_set_trace >>> doc = ''' ... >>> a = 1 ... >>> calls_set_trace() ... ''' >>> parser = doctest.DocTestParser() >>> runner = doctest.DocTestRunner(verbose=False) >>> test = parser.get_doctest(doc, globals(), "foo-bär@baz", "foo-bär@baz.py", 0) >>> real_stdin = sys.stdin >>> sys.stdin = _FakeInput([ ... 'print y', # print data defined in the function ... 'step', 'step', 'step', 'step', 'step', 'step', 'print z', ... 'up', 'print x', ... 'up', 'print y', ... 'up', 'print foo', ... 'continue', # stop debugging ... '']) >>> try: ... runner.run(test) ... finally: ... sys.stdin = real_stdin > <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(5)calls_set_trace() -> self.f1() (Pdb) print y 1 (Pdb) step --Call-- > <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(7)f1() -> def f1(self): (Pdb) step > <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(8)f1() -> x = 1 (Pdb) step > <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(9)f1() -> self.f2() (Pdb) step --Call-- > <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(11)f2() -> def f2(self): (Pdb) step > <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(12)f2() -> z = 1 (Pdb) step > <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(13)f2() -> z = 2 (Pdb) print z 1 (Pdb) up > <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(9)f1() -> self.f2() (Pdb) print x 1 (Pdb) up > <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(5)calls_set_trace() -> self.f1() (Pdb) print y 1 (Pdb) up > <doctest foo-bär@baz[1]>(1)<module>() -> calls_set_trace() (Pdb) print foo *** NameError: name 'foo' is not defined (Pdb) continue TestResults(failed=0, attempted=2) """ def test_DocTestSuite(): """DocTestSuite creates a unittest test suite from a doctest. We create a Suite by providing a module. A module can be provided by passing a module object: >>> import unittest >>> import test.sample_doctest >>> suite = doctest.DocTestSuite(test.sample_doctest) >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=9 errors=0 failures=4> We can also supply the module by name: >>> suite = doctest.DocTestSuite('test.sample_doctest') >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=9 errors=0 failures=4> The module need not contain any doctest examples: >>> suite = doctest.DocTestSuite('test.sample_doctest_no_doctests') >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=0 errors=0 failures=0> However, if DocTestSuite finds no docstrings, it raises an error: >>> try: ... doctest.DocTestSuite('test.sample_doctest_no_docstrings') ... except ValueError as e: ... error = e >>> print(error.args[1]) has no docstrings You can prevent this error by passing a DocTestFinder instance with the `exclude_empty` keyword argument set to False: >>> finder = doctest.DocTestFinder(exclude_empty=False) >>> suite = doctest.DocTestSuite('test.sample_doctest_no_docstrings', ... test_finder=finder) >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=0 errors=0 failures=0> We can use the current module: >>> suite = test.sample_doctest.test_suite() >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=9 errors=0 failures=4> We can supply global variables. If we pass globs, they will be used instead of the module globals. Here we'll pass an empty globals, triggering an extra error: >>> suite = doctest.DocTestSuite('test.sample_doctest', globs={}) >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=9 errors=0 failures=5> Alternatively, we can provide extra globals. Here we'll make an error go away by providing an extra global variable: >>> suite = doctest.DocTestSuite('test.sample_doctest', ... extraglobs={'y': 1}) >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=9 errors=0 failures=3> You can pass option flags. Here we'll cause an extra error by disabling the blank-line feature: >>> suite = doctest.DocTestSuite('test.sample_doctest', ... optionflags=doctest.DONT_ACCEPT_BLANKLINE) >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=9 errors=0 failures=5> You can supply setUp and tearDown functions: >>> def setUp(t): ... import test.test_doctest ... test.test_doctest.sillySetup = True >>> def tearDown(t): ... import test.test_doctest ... del test.test_doctest.sillySetup Here, we installed a silly variable that the test expects: >>> suite = doctest.DocTestSuite('test.sample_doctest', ... setUp=setUp, tearDown=tearDown) >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=9 errors=0 failures=3> But the tearDown restores sanity: >>> import test.test_doctest >>> test.test_doctest.sillySetup Traceback (most recent call last): ... AttributeError: 'module' object has no attribute 'sillySetup' The setUp and tearDown funtions are passed test objects. Here we'll use the setUp function to supply the missing variable y: >>> def setUp(test): ... test.globs['y'] = 1 >>> suite = doctest.DocTestSuite('test.sample_doctest', setUp=setUp) >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=9 errors=0 failures=3> Here, we didn't need to use a tearDown function because we modified the test globals, which are a copy of the sample_doctest module dictionary. The test globals are automatically cleared for us after a test. """ def test_DocFileSuite(): """We can test tests found in text files using a DocFileSuite. We create a suite by providing the names of one or more text files that include examples: >>> import unittest >>> suite = doctest.DocFileSuite('test_doctest.txt', ... 'test_doctest2.txt', ... 'test_doctest4.txt') >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=3 errors=0 failures=3> The test files are looked for in the directory containing the calling module. A package keyword argument can be provided to specify a different relative location. >>> import unittest >>> suite = doctest.DocFileSuite('test_doctest.txt', ... 'test_doctest2.txt', ... 'test_doctest4.txt', ... package='test') >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=3 errors=0 failures=3> Support for using a package's __loader__.get_data() is also provided. >>> import unittest, pkgutil, test >>> added_loader = False >>> if not hasattr(test, '__loader__'): ... test.__loader__ = pkgutil.get_loader(test) ... added_loader = True >>> try: ... suite = doctest.DocFileSuite('test_doctest.txt', ... 'test_doctest2.txt', ... 'test_doctest4.txt', ... package='test') ... suite.run(unittest.TestResult()) ... finally: ... if added_loader: ... del test.__loader__ <unittest.result.TestResult run=3 errors=0 failures=3> '/' should be used as a path separator. It will be converted to a native separator at run time: >>> suite = doctest.DocFileSuite('../test/test_doctest.txt') >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=1 errors=0 failures=1> If DocFileSuite is used from an interactive session, then files are resolved relative to the directory of sys.argv[0]: >>> import types, os.path, test.test_doctest >>> save_argv = sys.argv >>> sys.argv = [test.test_doctest.__file__] >>> suite = doctest.DocFileSuite('test_doctest.txt', ... package=types.ModuleType('__main__')) >>> sys.argv = save_argv By setting `module_relative=False`, os-specific paths may be used (including absolute paths and paths relative to the working directory): >>> # Get the absolute path of the test package. >>> test_doctest_path = os.path.abspath(test.test_doctest.__file__) >>> test_pkg_path = os.path.split(test_doctest_path)[0] >>> # Use it to find the absolute path of test_doctest.txt. >>> test_file = os.path.join(test_pkg_path, 'test_doctest.txt') >>> suite = doctest.DocFileSuite(test_file, module_relative=False) >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=1 errors=0 failures=1> It is an error to specify `package` when `module_relative=False`: >>> suite = doctest.DocFileSuite(test_file, module_relative=False, ... package='test') Traceback (most recent call last): ValueError: Package may only be specified for module-relative paths. You can specify initial global variables: >>> suite = doctest.DocFileSuite('test_doctest.txt', ... 'test_doctest2.txt', ... 'test_doctest4.txt', ... globs={'favorite_color': 'blue'}) >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=3 errors=0 failures=2> In this case, we supplied a missing favorite color. You can provide doctest options: >>> suite = doctest.DocFileSuite('test_doctest.txt', ... 'test_doctest2.txt', ... 'test_doctest4.txt', ... optionflags=doctest.DONT_ACCEPT_BLANKLINE, ... globs={'favorite_color': 'blue'}) >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=3 errors=0 failures=3> And, you can provide setUp and tearDown functions: >>> def setUp(t): ... import test.test_doctest ... test.test_doctest.sillySetup = True >>> def tearDown(t): ... import test.test_doctest ... del test.test_doctest.sillySetup Here, we installed a silly variable that the test expects: >>> suite = doctest.DocFileSuite('test_doctest.txt', ... 'test_doctest2.txt', ... 'test_doctest4.txt', ... setUp=setUp, tearDown=tearDown) >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=3 errors=0 failures=2> But the tearDown restores sanity: >>> import test.test_doctest >>> test.test_doctest.sillySetup Traceback (most recent call last): ... AttributeError: 'module' object has no attribute 'sillySetup' The setUp and tearDown funtions are passed test objects. Here, we'll use a setUp function to set the favorite color in test_doctest.txt: >>> def setUp(test): ... test.globs['favorite_color'] = 'blue' >>> suite = doctest.DocFileSuite('test_doctest.txt', setUp=setUp) >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=1 errors=0 failures=0> Here, we didn't need to use a tearDown function because we modified the test globals. The test globals are automatically cleared for us after a test. Tests in a file run using `DocFileSuite` can also access the `__file__` global, which is set to the name of the file containing the tests: >>> suite = doctest.DocFileSuite('test_doctest3.txt') >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=1 errors=0 failures=0> If the tests contain non-ASCII characters, we have to specify which encoding the file is encoded with. We do so by using the `encoding` parameter: >>> suite = doctest.DocFileSuite('test_doctest.txt', ... 'test_doctest2.txt', ... 'test_doctest4.txt', ... encoding='utf-8') >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=3 errors=0 failures=2> """ def test_trailing_space_in_test(): """ Trailing spaces in expected output are significant: >>> x, y = 'foo', '' >>> print x, y foo \n """ def test_unittest_reportflags(): """Default unittest reporting flags can be set to control reporting Here, we'll set the REPORT_ONLY_FIRST_FAILURE option so we see only the first failure of each test. First, we'll look at the output without the flag. The file test_doctest.txt file has two tests. They both fail if blank lines are disabled: >>> suite = doctest.DocFileSuite('test_doctest.txt', ... optionflags=doctest.DONT_ACCEPT_BLANKLINE) >>> import unittest >>> result = suite.run(unittest.TestResult()) >>> print result.failures[0][1] # doctest: +ELLIPSIS Traceback ... Failed example: favorite_color ... Failed example: if 1: ... Note that we see both failures displayed. >>> old = doctest.set_unittest_reportflags( ... doctest.REPORT_ONLY_FIRST_FAILURE) Now, when we run the test: >>> result = suite.run(unittest.TestResult()) >>> print result.failures[0][1] # doctest: +ELLIPSIS Traceback ... Failed example: favorite_color Exception raised: ... NameError: name 'favorite_color' is not defined <BLANKLINE> <BLANKLINE> We get only the first failure. If we give any reporting options when we set up the tests, however: >>> suite = doctest.DocFileSuite('test_doctest.txt', ... optionflags=doctest.DONT_ACCEPT_BLANKLINE | doctest.REPORT_NDIFF) Then the default eporting options are ignored: >>> result = suite.run(unittest.TestResult()) >>> print result.failures[0][1] # doctest: +ELLIPSIS Traceback ... Failed example: favorite_color ... Failed example: if 1: print 'a' print print 'b' Differences (ndiff with -expected +actual): a - <BLANKLINE> + b <BLANKLINE> <BLANKLINE> Test runners can restore the formatting flags after they run: >>> ignored = doctest.set_unittest_reportflags(old) """ def test_testfile(): r""" Tests for the `testfile()` function. This function runs all the doctest examples in a given file. In its simple invokation, it is called with the name of a file, which is taken to be relative to the calling module. The return value is (#failures, #tests). We don't want `-v` in sys.argv for these tests. >>> save_argv = sys.argv >>> if '-v' in sys.argv: ... sys.argv = [arg for arg in save_argv if arg != '-v'] >>> doctest.testfile('test_doctest.txt') # doctest: +ELLIPSIS ********************************************************************** File "...", line 6, in test_doctest.txt Failed example: favorite_color Exception raised: ... NameError: name 'favorite_color' is not defined ********************************************************************** 1 items had failures: 1 of 2 in test_doctest.txt ***Test Failed*** 1 failures. TestResults(failed=1, attempted=2) >>> doctest.master = None # Reset master. (Note: we'll be clearing doctest.master after each call to `doctest.testfile`, to suppress warnings about multiple tests with the same name.) Globals may be specified with the `globs` and `extraglobs` parameters: >>> globs = {'favorite_color': 'blue'} >>> doctest.testfile('test_doctest.txt', globs=globs) TestResults(failed=0, attempted=2) >>> doctest.master = None # Reset master. >>> extraglobs = {'favorite_color': 'red'} >>> doctest.testfile('test_doctest.txt', globs=globs, ... extraglobs=extraglobs) # doctest: +ELLIPSIS ********************************************************************** File "...", line 6, in test_doctest.txt Failed example: favorite_color Expected: 'blue' Got: 'red' ********************************************************************** 1 items had failures: 1 of 2 in test_doctest.txt ***Test Failed*** 1 failures. TestResults(failed=1, attempted=2) >>> doctest.master = None # Reset master. The file may be made relative to a given module or package, using the optional `module_relative` parameter: >>> doctest.testfile('test_doctest.txt', globs=globs, ... module_relative='test') TestResults(failed=0, attempted=2) >>> doctest.master = None # Reset master. Verbosity can be increased with the optional `verbose` parameter: >>> doctest.testfile('test_doctest.txt', globs=globs, verbose=True) Trying: favorite_color Expecting: 'blue' ok Trying: if 1: print 'a' print print 'b' Expecting: a <BLANKLINE> b ok 1 items passed all tests: 2 tests in test_doctest.txt 2 tests in 1 items. 2 passed and 0 failed. Test passed. TestResults(failed=0, attempted=2) >>> doctest.master = None # Reset master. The name of the test may be specified with the optional `name` parameter: >>> doctest.testfile('test_doctest.txt', name='newname') ... # doctest: +ELLIPSIS ********************************************************************** File "...", line 6, in newname ... TestResults(failed=1, attempted=2) >>> doctest.master = None # Reset master. The summary report may be suppressed with the optional `report` parameter: >>> doctest.testfile('test_doctest.txt', report=False) ... # doctest: +ELLIPSIS ********************************************************************** File "...", line 6, in test_doctest.txt Failed example: favorite_color Exception raised: ... NameError: name 'favorite_color' is not defined TestResults(failed=1, attempted=2) >>> doctest.master = None # Reset master. The optional keyword argument `raise_on_error` can be used to raise an exception on the first error (which may be useful for postmortem debugging): >>> doctest.testfile('test_doctest.txt', raise_on_error=True) ... # doctest: +ELLIPSIS Traceback (most recent call last): UnexpectedException: ... >>> doctest.master = None # Reset master. If the tests contain non-ASCII characters, the tests might fail, since it's unknown which encoding is used. The encoding can be specified using the optional keyword argument `encoding`: >>> doctest.testfile('test_doctest4.txt') # doctest: +ELLIPSIS ********************************************************************** File "...", line 7, in test_doctest4.txt Failed example: u'...' Expected: u'f\xf6\xf6' Got: u'f\xc3\xb6\xc3\xb6' ********************************************************************** ... ********************************************************************** 1 items had failures: 2 of 4 in test_doctest4.txt ***Test Failed*** 2 failures. TestResults(failed=2, attempted=4) >>> doctest.master = None # Reset master. >>> doctest.testfile('test_doctest4.txt', encoding='utf-8') TestResults(failed=0, attempted=4) >>> doctest.master = None # Reset master. Switch the module encoding to 'utf-8' to test the verbose output without bothering with the current sys.stdout encoding. >>> doctest._encoding, saved_encoding = 'utf-8', doctest._encoding >>> doctest.testfile('test_doctest4.txt', encoding='utf-8', verbose=True) Trying: u'föö' Expecting: u'f\xf6\xf6' ok Trying: u'bąr' Expecting: u'b\u0105r' ok Trying: 'föö' Expecting: 'f\xc3\xb6\xc3\xb6' ok Trying: 'bąr' Expecting: 'b\xc4\x85r' ok 1 items passed all tests: 4 tests in test_doctest4.txt 4 tests in 1 items. 4 passed and 0 failed. Test passed. TestResults(failed=0, attempted=4) >>> doctest._encoding = saved_encoding >>> doctest.master = None # Reset master. >>> sys.argv = save_argv """ def test_lineendings(): r""" *nix systems use \n line endings, while Windows systems use \r\n. Python handles this using universal newline mode for reading files. Let's make sure doctest does so (issue 8473) by creating temporary test files using each of the two line disciplines. One of the two will be the "wrong" one for the platform the test is run on. Windows line endings first: >>> import tempfile, os >>> fn = tempfile.mktemp() >>> with open(fn, 'wb') as f: ... f.write('Test:\r\n\r\n >>> x = 1 + 1\r\n\r\nDone.\r\n') >>> doctest.testfile(fn, False) TestResults(failed=0, attempted=1) >>> os.remove(fn) And now *nix line endings: >>> fn = tempfile.mktemp() >>> with open(fn, 'wb') as f: ... f.write('Test:\n\n >>> x = 1 + 1\n\nDone.\n') >>> doctest.testfile(fn, False) TestResults(failed=0, attempted=1) >>> os.remove(fn) """ # old_test1, ... used to live in doctest.py, but cluttered it. Note # that these use the deprecated doctest.Tester, so should go away (or # be rewritten) someday. def old_test1(): r""" >>> from doctest import Tester >>> t = Tester(globs={'x': 42}, verbose=0) >>> t.runstring(r''' ... >>> x = x * 2 ... >>> print x ... 42 ... ''', 'XYZ') ********************************************************************** Line 3, in XYZ Failed example: print x Expected: 42 Got: 84 TestResults(failed=1, attempted=2) >>> t.runstring(">>> x = x * 2\n>>> print x\n84\n", 'example2') TestResults(failed=0, attempted=2) >>> t.summarize() ********************************************************************** 1 items had failures: 1 of 2 in XYZ ***Test Failed*** 1 failures. TestResults(failed=1, attempted=4) >>> t.summarize(verbose=1) 1 items passed all tests: 2 tests in example2 ********************************************************************** 1 items had failures: 1 of 2 in XYZ 4 tests in 2 items. 3 passed and 1 failed. ***Test Failed*** 1 failures. TestResults(failed=1, attempted=4) """ def old_test2(): r""" >>> from doctest import Tester >>> t = Tester(globs={}, verbose=1) >>> test = r''' ... # just an example ... >>> x = 1 + 2 ... >>> x ... 3 ... ''' >>> t.runstring(test, "Example") Running string Example Trying: x = 1 + 2 Expecting nothing ok Trying: x Expecting: 3 ok 0 of 2 examples failed in string Example TestResults(failed=0, attempted=2) """ def old_test3(): r""" >>> from doctest import Tester >>> t = Tester(globs={}, verbose=0) >>> def _f(): ... '''Trivial docstring example. ... >>> assert 2 == 2 ... ''' ... return 32 ... >>> t.rundoc(_f) # expect 0 failures in 1 example TestResults(failed=0, attempted=1) """ def old_test4(): """ >>> import types >>> m1 = types.ModuleType('_m1') >>> m2 = types.ModuleType('_m2') >>> test_data = \""" ... def _f(): ... '''>>> assert 1 == 1 ... ''' ... def g(): ... '''>>> assert 2 != 1 ... ''' ... class H: ... '''>>> assert 2 > 1 ... ''' ... def bar(self): ... '''>>> assert 1 < 2 ... ''' ... \""" >>> exec test_data in m1.__dict__ >>> exec test_data in m2.__dict__ >>> m1.__dict__.update({"f2": m2._f, "g2": m2.g, "h2": m2.H}) Tests that objects outside m1 are excluded: >>> from doctest import Tester >>> t = Tester(globs={}, verbose=0) >>> t.rundict(m1.__dict__, "rundict_test", m1) # f2 and g2 and h2 skipped TestResults(failed=0, attempted=4) Once more, not excluding stuff outside m1: >>> t = Tester(globs={}, verbose=0) >>> t.rundict(m1.__dict__, "rundict_test_pvt") # None are skipped. TestResults(failed=0, attempted=8) The exclusion of objects from outside the designated module is meant to be invoked automagically by testmod. >>> doctest.testmod(m1, verbose=False) TestResults(failed=0, attempted=4) """ ###################################################################### ## Main ###################################################################### def test_main(): # Check the doctest cases in doctest itself: test_support.run_doctest(doctest, verbosity=True) from test import test_doctest # Ignore all warnings about the use of class Tester in this module. deprecations = [] if __debug__: deprecations.append(("class Tester is deprecated", DeprecationWarning)) if sys.py3kwarning: deprecations += [("backquote not supported", SyntaxWarning), ("execfile.. not supported", DeprecationWarning)] with test_support.check_warnings(*deprecations): # Check the doctest cases defined here: test_support.run_doctest(test_doctest, verbosity=True) import sys def test_coverage(coverdir): trace = test_support.import_module('trace') tracer = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix,], trace=0, count=1) tracer.run('reload(doctest); test_main()') r = tracer.results() print 'Writing coverage results...' r.write_results(show_missing=True, summary=True, coverdir=coverdir) if __name__ == '__main__': if '-c' in sys.argv: test_coverage('/tmp/doctest.cover') else: test_main()
mit
HiroIshikawa/21playground
microblog/flask/lib/python3.5/site-packages/coverage/backward.py
26
4909
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt """Add things to old Pythons so I can pretend they are newer.""" # This file does lots of tricky stuff, so disable a bunch of pylint warnings. # pylint: disable=redefined-builtin # pylint: disable=unused-import # pylint: disable=no-name-in-module import sys from coverage import env # Pythons 2 and 3 differ on where to get StringIO. try: from cStringIO import StringIO except ImportError: from io import StringIO # In py3, ConfigParser was renamed to the more-standard configparser try: import configparser except ImportError: import ConfigParser as configparser # What's a string called? try: string_class = basestring except NameError: string_class = str # What's a Unicode string called? try: unicode_class = unicode except NameError: unicode_class = str # Where do pickles come from? try: import cPickle as pickle except ImportError: import pickle # range or xrange? try: range = xrange except NameError: range = range # shlex.quote is new, but there's an undocumented implementation in "pipes", # who knew!? try: from shlex import quote as shlex_quote except ImportError: # Useful function, available under a different (undocumented) name # in Python versions earlier than 3.3. from pipes import quote as shlex_quote # A function to iterate listlessly over a dict's items. try: {}.iteritems except AttributeError: def iitems(d): """Produce the items from dict `d`.""" return d.items() else: def iitems(d): """Produce the items from dict `d`.""" return d.iteritems() # Getting the `next` function from an iterator is different in 2 and 3. try: iter([]).next except AttributeError: def iternext(seq): """Get the `next` function for iterating over `seq`.""" return iter(seq).__next__ else: def iternext(seq): """Get the `next` function for iterating over `seq`.""" return iter(seq).next # Python 3.x is picky about bytes and strings, so provide methods to # get them right, and make them no-ops in 2.x if env.PY3: def to_bytes(s): """Convert string `s` to bytes.""" return s.encode('utf8') def binary_bytes(byte_values): """Produce a byte string with the ints from `byte_values`.""" return bytes(byte_values) def byte_to_int(byte_value): """Turn an element of a bytes object into an int.""" return byte_value def bytes_to_ints(bytes_value): """Turn a bytes object into a sequence of ints.""" # In Python 3, iterating bytes gives ints. return bytes_value else: def to_bytes(s): """Convert string `s` to bytes (no-op in 2.x).""" return s def binary_bytes(byte_values): """Produce a byte string with the ints from `byte_values`.""" return "".join(chr(b) for b in byte_values) def byte_to_int(byte_value): """Turn an element of a bytes object into an int.""" return ord(byte_value) def bytes_to_ints(bytes_value): """Turn a bytes object into a sequence of ints.""" for byte in bytes_value: yield ord(byte) try: # In Python 2.x, the builtins were in __builtin__ BUILTINS = sys.modules['__builtin__'] except KeyError: # In Python 3.x, they're in builtins BUILTINS = sys.modules['builtins'] # imp was deprecated in Python 3.3 try: import importlib import importlib.util imp = None except ImportError: importlib = None # We only want to use importlib if it has everything we need. try: importlib_util_find_spec = importlib.util.find_spec except Exception: import imp importlib_util_find_spec = None # What is the .pyc magic number for this version of Python? try: PYC_MAGIC_NUMBER = importlib.util.MAGIC_NUMBER except AttributeError: PYC_MAGIC_NUMBER = imp.get_magic() def import_local_file(modname, modfile=None): """Import a local file as a module. Opens a file in the current directory named `modname`.py, imports it as `modname`, and returns the module object. `modfile` is the file to import if it isn't in the current directory. """ try: from importlib.machinery import SourceFileLoader except ImportError: SourceFileLoader = None if modfile is None: modfile = modname + '.py' if SourceFileLoader: mod = SourceFileLoader(modname, modfile).load_module() else: for suff in imp.get_suffixes(): # pragma: part covered if suff[0] == '.py': break with open(modfile, 'r') as f: # pylint: disable=undefined-loop-variable mod = imp.load_module(modname, f, modfile, suff) return mod
mit
Jollytown/Garuda
server/garuda/lib/python2.7/site-packages/django/utils/dateformat.py
115
10703
""" PHP date() style date formatting See http://www.php.net/date for format strings Usage: >>> import datetime >>> d = datetime.datetime.now() >>> df = DateFormat(d) >>> print(df.format('jS F Y H:i')) 7th October 2003 11:39 >>> """ from __future__ import unicode_literals import re import time import calendar import datetime from django.utils.dates import MONTHS, MONTHS_3, MONTHS_ALT, MONTHS_AP, WEEKDAYS, WEEKDAYS_ABBR from django.utils.translation import ugettext as _ from django.utils.encoding import force_text from django.utils import six from django.utils.timezone import get_default_timezone, is_aware, is_naive re_formatchars = re.compile(r'(?<!\\)([aAbBcdDeEfFgGhHiIjlLmMnNoOPrsStTUuwWyYzZ])') re_escaped = re.compile(r'\\(.)') class Formatter(object): def format(self, formatstr): pieces = [] for i, piece in enumerate(re_formatchars.split(force_text(formatstr))): if i % 2: pieces.append(force_text(getattr(self, piece)())) elif piece: pieces.append(re_escaped.sub(r'\1', piece)) return ''.join(pieces) class TimeFormat(Formatter): def __init__(self, obj): self.data = obj self.timezone = None # We only support timezone when formatting datetime objects, # not date objects (timezone information not appropriate), # or time objects (against established django policy). if isinstance(obj, datetime.datetime): if is_naive(obj): self.timezone = get_default_timezone() else: self.timezone = obj.tzinfo def a(self): "'a.m.' or 'p.m.'" if self.data.hour > 11: return _('p.m.') return _('a.m.') def A(self): "'AM' or 'PM'" if self.data.hour > 11: return _('PM') return _('AM') def B(self): "Swatch Internet time" raise NotImplementedError('may be implemented in a future release') def e(self): """ Timezone name. If timezone information is not available, this method returns an empty string. """ if not self.timezone: return "" try: if hasattr(self.data, 'tzinfo') and self.data.tzinfo: # Have to use tzinfo.tzname and not datetime.tzname # because datatime.tzname does not expect Unicode return self.data.tzinfo.tzname(self.data) or "" except NotImplementedError: pass return "" def f(self): """ Time, in 12-hour hours and minutes, with minutes left off if they're zero. Examples: '1', '1:30', '2:05', '2' Proprietary extension. """ if self.data.minute == 0: return self.g() return '%s:%s' % (self.g(), self.i()) def g(self): "Hour, 12-hour format without leading zeros; i.e. '1' to '12'" if self.data.hour == 0: return 12 if self.data.hour > 12: return self.data.hour - 12 return self.data.hour def G(self): "Hour, 24-hour format without leading zeros; i.e. '0' to '23'" return self.data.hour def h(self): "Hour, 12-hour format; i.e. '01' to '12'" return '%02d' % self.g() def H(self): "Hour, 24-hour format; i.e. '00' to '23'" return '%02d' % self.G() def i(self): "Minutes; i.e. '00' to '59'" return '%02d' % self.data.minute def O(self): """ Difference to Greenwich time in hours; e.g. '+0200', '-0430'. If timezone information is not available, this method returns an empty string. """ if not self.timezone: return "" seconds = self.Z() sign = '-' if seconds < 0 else '+' seconds = abs(seconds) return "%s%02d%02d" % (sign, seconds // 3600, (seconds // 60) % 60) def P(self): """ Time, in 12-hour hours, minutes and 'a.m.'/'p.m.', with minutes left off if they're zero and the strings 'midnight' and 'noon' if appropriate. Examples: '1 a.m.', '1:30 p.m.', 'midnight', 'noon', '12:30 p.m.' Proprietary extension. """ if self.data.minute == 0 and self.data.hour == 0: return _('midnight') if self.data.minute == 0 and self.data.hour == 12: return _('noon') return '%s %s' % (self.f(), self.a()) def s(self): "Seconds; i.e. '00' to '59'" return '%02d' % self.data.second def T(self): """ Time zone of this machine; e.g. 'EST' or 'MDT'. If timezone information is not available, this method returns an empty string. """ if not self.timezone: return "" name = self.timezone.tzname(self.data) if self.timezone else None if name is None: name = self.format('O') return six.text_type(name) def u(self): "Microseconds; i.e. '000000' to '999999'" return '%06d' % self.data.microsecond def Z(self): """ Time zone offset in seconds (i.e. '-43200' to '43200'). The offset for timezones west of UTC is always negative, and for those east of UTC is always positive. If timezone information is not available, this method returns an empty string. """ if not self.timezone: return "" offset = self.timezone.utcoffset(self.data) # `offset` is a datetime.timedelta. For negative values (to the west of # UTC) only days can be negative (days=-1) and seconds are always # positive. e.g. UTC-1 -> timedelta(days=-1, seconds=82800, microseconds=0) # Positive offsets have days=0 return offset.days * 86400 + offset.seconds class DateFormat(TimeFormat): year_days = [None, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334] def b(self): "Month, textual, 3 letters, lowercase; e.g. 'jan'" return MONTHS_3[self.data.month] def c(self): """ ISO 8601 Format Example : '2008-01-02T10:30:00.000123' """ return self.data.isoformat() def d(self): "Day of the month, 2 digits with leading zeros; i.e. '01' to '31'" return '%02d' % self.data.day def D(self): "Day of the week, textual, 3 letters; e.g. 'Fri'" return WEEKDAYS_ABBR[self.data.weekday()] def E(self): "Alternative month names as required by some locales. Proprietary extension." return MONTHS_ALT[self.data.month] def F(self): "Month, textual, long; e.g. 'January'" return MONTHS[self.data.month] def I(self): "'1' if Daylight Savings Time, '0' otherwise." if self.timezone and self.timezone.dst(self.data): return '1' else: return '0' def j(self): "Day of the month without leading zeros; i.e. '1' to '31'" return self.data.day def l(self): "Day of the week, textual, long; e.g. 'Friday'" return WEEKDAYS[self.data.weekday()] def L(self): "Boolean for whether it is a leap year; i.e. True or False" return calendar.isleap(self.data.year) def m(self): "Month; i.e. '01' to '12'" return '%02d' % self.data.month def M(self): "Month, textual, 3 letters; e.g. 'Jan'" return MONTHS_3[self.data.month].title() def n(self): "Month without leading zeros; i.e. '1' to '12'" return self.data.month def N(self): "Month abbreviation in Associated Press style. Proprietary extension." return MONTHS_AP[self.data.month] def o(self): "ISO 8601 year number matching the ISO week number (W)" return self.data.isocalendar()[0] def r(self): "RFC 2822 formatted date; e.g. 'Thu, 21 Dec 2000 16:01:07 +0200'" return self.format('D, j M Y H:i:s O') def S(self): "English ordinal suffix for the day of the month, 2 characters; i.e. 'st', 'nd', 'rd' or 'th'" if self.data.day in (11, 12, 13): # Special case return 'th' last = self.data.day % 10 if last == 1: return 'st' if last == 2: return 'nd' if last == 3: return 'rd' return 'th' def t(self): "Number of days in the given month; i.e. '28' to '31'" return '%02d' % calendar.monthrange(self.data.year, self.data.month)[1] def U(self): "Seconds since the Unix epoch (January 1 1970 00:00:00 GMT)" if isinstance(self.data, datetime.datetime) and is_aware(self.data): return int(calendar.timegm(self.data.utctimetuple())) else: return int(time.mktime(self.data.timetuple())) def w(self): "Day of the week, numeric, i.e. '0' (Sunday) to '6' (Saturday)" return (self.data.weekday() + 1) % 7 def W(self): "ISO-8601 week number of year, weeks starting on Monday" # Algorithm from http://www.personal.ecu.edu/mccartyr/ISOwdALG.txt week_number = None jan1_weekday = self.data.replace(month=1, day=1).weekday() + 1 weekday = self.data.weekday() + 1 day_of_year = self.z() if day_of_year <= (8 - jan1_weekday) and jan1_weekday > 4: if jan1_weekday == 5 or (jan1_weekday == 6 and calendar.isleap(self.data.year - 1)): week_number = 53 else: week_number = 52 else: if calendar.isleap(self.data.year): i = 366 else: i = 365 if (i - day_of_year) < (4 - weekday): week_number = 1 else: j = day_of_year + (7 - weekday) + (jan1_weekday - 1) week_number = j // 7 if jan1_weekday > 4: week_number -= 1 return week_number def y(self): "Year, 2 digits; e.g. '99'" return six.text_type(self.data.year)[2:] def Y(self): "Year, 4 digits; e.g. '1999'" return self.data.year def z(self): "Day of the year; i.e. '0' to '365'" doy = self.year_days[self.data.month] + self.data.day if self.L() and self.data.month > 2: doy += 1 return doy def format(value, format_string): "Convenience function" df = DateFormat(value) return df.format(format_string) def time_format(value, format_string): "Convenience function" tf = TimeFormat(value) return tf.format(format_string)
mit
affo/nova
nova/volume/encryptors/base.py
61
1949
# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_log import log as logging import six from nova import keymgr LOG = logging.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class VolumeEncryptor(object): """Base class to support encrypted volumes. A VolumeEncryptor provides hooks for attaching and detaching volumes, which are called immediately prior to attaching the volume to an instance and immediately following detaching the volume from an instance. This class performs no actions for either hook. """ def __init__(self, connection_info, **kwargs): self._key_manager = keymgr.API() self.encryption_key_id = kwargs.get('encryption_key_id') def _get_key(self, context): """Retrieves the encryption key for the specified volume. :param: the connection information used to attach the volume """ return self._key_manager.get_key(context, self.encryption_key_id) @abc.abstractmethod def attach_volume(self, context, **kwargs): """Hook called immediately prior to attaching a volume to an instance. """ pass @abc.abstractmethod def detach_volume(self, **kwargs): """Hook called immediately after detaching a volume from an instance. """ pass
apache-2.0
opendaylight/netvirt
resources/tools/odltools/odltools/mdsal/tests/test_neutron.py
1
1436
# Copyright 2018 Red Hat, Inc. and others. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import unittest from odltools import logg from odltools.mdsal.models.neutron import Neutron from odltools.mdsal.models.neutron import neutron from odltools.mdsal.models.model import Model from odltools.mdsal import tests class TestNeutron(unittest.TestCase): def setUp(self): logg.Logger(logging.INFO, logging.INFO) args = tests.Args(path=tests.get_resources_path()) self.neutron = neutron(Model.CONFIG, args) def test_get_objects_by_key(self): d = self.neutron.get_objects_by_key(obj=Neutron.NETWORKS) self.assertIsNotNone(d.get('bd8db3a8-2b30-4083-a8b3-b3fd46401142')) d = self.neutron.get_objects_by_key(obj=Neutron.PORTS) self.assertIsNotNone(d.get('8e3c262e-7b45-4222-ac4e-528db75e5516')) if __name__ == '__main__': unittest.main()
epl-1.0
TyRoXx/cdm
original_sources/boost_1_59_0/tools/build/test/project_test3.py
44
4117
#!/usr/bin/python # Copyright 2002, 2003 Dave Abrahams # Copyright 2002, 2003, 2004, 2006 Vladimir Prus # Distributed under the Boost Software License, Version 1.0. # (See accompanying file LICENSE_1_0.txt or copy at # http://www.boost.org/LICENSE_1_0.txt) import BoostBuild import os t = BoostBuild.Tester(translate_suffixes=0) # First check some startup. t.set_tree("project-test3") os.remove("jamroot.jam") t.run_build_system(status=1) t.expect_output_lines("error: Could not find parent for project at '.'\n" "error: Did not find Jamfile.jam or Jamroot.jam in any parent directory.") t.set_tree("project-test3") t.run_build_system() t.expect_addition("bin/$toolset/debug/a.obj") t.expect_content("bin/$toolset/debug/a.obj", """\ $toolset/debug a.cpp """) t.expect_addition("bin/$toolset/debug/a.exe") t.expect_content("bin/$toolset/debug/a.exe", "$toolset/debug\n" + "bin/$toolset/debug/a.obj lib/bin/$toolset/debug/b.obj " + "lib2/bin/$toolset/debug/c.obj lib2/bin/$toolset/debug/d.obj " + "lib2/helper/bin/$toolset/debug/e.obj " + "lib3/bin/$toolset/debug/f.obj\n" ) t.expect_addition("lib/bin/$toolset/debug/b.obj") t.expect_content("lib/bin/$toolset/debug/b.obj", """\ $toolset/debug lib/b.cpp """) t.expect_addition("lib/bin/$toolset/debug/m.exe") t.expect_content("lib/bin/$toolset/debug/m.exe", """\ $toolset/debug lib/bin/$toolset/debug/b.obj lib2/bin/$toolset/debug/c.obj """) t.expect_addition("lib2/bin/$toolset/debug/c.obj") t.expect_content("lib2/bin/$toolset/debug/c.obj", """\ $toolset/debug lib2/c.cpp """) t.expect_addition("lib2/bin/$toolset/debug/d.obj") t.expect_content("lib2/bin/$toolset/debug/d.obj", """\ $toolset/debug lib2/d.cpp """) t.expect_addition("lib2/bin/$toolset/debug/l.exe") t.expect_content("lib2/bin/$toolset/debug/l.exe", """\ $toolset/debug lib2/bin/$toolset/debug/c.obj bin/$toolset/debug/a.obj """) t.expect_addition("lib2/helper/bin/$toolset/debug/e.obj") t.expect_content("lib2/helper/bin/$toolset/debug/e.obj", """\ $toolset/debug lib2/helper/e.cpp """) t.expect_addition("lib3/bin/$toolset/debug/f.obj") t.expect_content("lib3/bin/$toolset/debug/f.obj", """\ $toolset/debug lib3/f.cpp lib2/helper/bin/$toolset/debug/e.obj """) t.touch("a.cpp") t.run_build_system() t.expect_touch(["bin/$toolset/debug/a.obj", "bin/$toolset/debug/a.exe", "lib2/bin/$toolset/debug/l.exe"]) t.run_build_system(["release", "optimization=off,speed"]) t.expect_addition(["bin/$toolset/release/a.exe", "bin/$toolset/release/a.obj", "bin/$toolset/release/optimization-off/a.exe", "bin/$toolset/release/optimization-off/a.obj"]) t.run_build_system(["--clean-all"]) t.expect_removal(["bin/$toolset/debug/a.obj", "bin/$toolset/debug/a.exe", "lib/bin/$toolset/debug/b.obj", "lib/bin/$toolset/debug/m.exe", "lib2/bin/$toolset/debug/c.obj", "lib2/bin/$toolset/debug/d.obj", "lib2/bin/$toolset/debug/l.exe", "lib3/bin/$toolset/debug/f.obj"]) # Now test target ids in command line. t.set_tree("project-test3") t.run_build_system(["lib//b.obj"]) t.expect_addition("lib/bin/$toolset/debug/b.obj") t.expect_nothing_more() t.run_build_system(["--clean", "lib//b.obj"]) t.expect_removal("lib/bin/$toolset/debug/b.obj") t.expect_nothing_more() t.run_build_system(["lib//b.obj"]) t.expect_addition("lib/bin/$toolset/debug/b.obj") t.expect_nothing_more() t.run_build_system(["release", "lib2/helper//e.obj", "/lib3//f.obj"]) t.expect_addition("lib2/helper/bin/$toolset/release/e.obj") t.expect_addition("lib3/bin/$toolset/release/f.obj") t.expect_nothing_more() # Test project ids in command line work as well. t.set_tree("project-test3") t.run_build_system(["/lib2"]) t.expect_addition("lib2/bin/$toolset/debug/" * BoostBuild.List("c.obj d.obj l.exe")) t.expect_addition("bin/$toolset/debug/a.obj") t.expect_nothing_more() t.run_build_system(["lib"]) t.expect_addition("lib/bin/$toolset/debug/" * BoostBuild.List("b.obj m.exe")) t.expect_nothing_more() t.cleanup()
mit
veger/ansible
lib/ansible/modules/cloud/amazon/ec2_customer_gateway.py
37
9318
#!/usr/bin/python # # This is a free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This Ansible library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this library. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: ec2_customer_gateway short_description: Manage an AWS customer gateway description: - Manage an AWS customer gateway version_added: "2.2" author: Michael Baydoun (@MichaelBaydoun) requirements: [ botocore, boto3 ] notes: - You cannot create more than one customer gateway with the same IP address. If you run an identical request more than one time, the first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent requests do not create new customer gateway resources. - Return values contain customer_gateway and customer_gateways keys which are identical dicts. You should use customer_gateway. See U(https://github.com/ansible/ansible-modules-extras/issues/2773) for details. options: bgp_asn: description: - Border Gateway Protocol (BGP) Autonomous System Number (ASN), required when state=present. ip_address: description: - Internet-routable IP address for customers gateway, must be a static address. required: true name: description: - Name of the customer gateway. required: true routing: description: - The type of routing. choices: ['static', 'dynamic'] default: dynamic version_added: '2.4' state: description: - Create or terminate the Customer Gateway. default: present choices: [ 'present', 'absent' ] extends_documentation_fragment: - aws - ec2 ''' EXAMPLES = ''' # Create Customer Gateway - ec2_customer_gateway: bgp_asn: 12345 ip_address: 1.2.3.4 name: IndianapolisOffice region: us-east-1 register: cgw # Delete Customer Gateway - ec2_customer_gateway: ip_address: 1.2.3.4 name: IndianapolisOffice state: absent region: us-east-1 register: cgw ''' RETURN = ''' gateway.customer_gateways: description: details about the gateway that was created. returned: success type: complex contains: bgp_asn: description: The Border Gateway Autonomous System Number. returned: when exists and gateway is available. sample: 65123 type: string customer_gateway_id: description: gateway id assigned by amazon. returned: when exists and gateway is available. sample: cgw-cb6386a2 type: string ip_address: description: ip address of your gateway device. returned: when exists and gateway is available. sample: 1.2.3.4 type: string state: description: state of gateway. returned: when gateway exists and is available. state: available type: string tags: description: any tags on the gateway. returned: when gateway exists and is available, and when tags exist. state: available type: string type: description: encryption type. returned: when gateway exists and is available. sample: ipsec.1 type: string ''' try: from botocore.exceptions import ClientError HAS_BOTOCORE = True except ImportError: HAS_BOTOCORE = False try: import boto3 HAS_BOTO3 = True except ImportError: HAS_BOTO3 = False from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.ec2 import (boto3_conn, AWSRetry, camel_dict_to_snake_dict, ec2_argument_spec, get_aws_connection_info) class Ec2CustomerGatewayManager: def __init__(self, module): self.module = module try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) if not region: module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") self.ec2 = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) except ClientError as e: module.fail_json(msg=e.message) @AWSRetry.jittered_backoff(delay=2, max_delay=30, retries=6, catch_extra_error_codes=['IncorrectState']) def ensure_cgw_absent(self, gw_id): response = self.ec2.delete_customer_gateway( DryRun=False, CustomerGatewayId=gw_id ) return response def ensure_cgw_present(self, bgp_asn, ip_address): if not bgp_asn: bgp_asn = 65000 response = self.ec2.create_customer_gateway( DryRun=False, Type='ipsec.1', PublicIp=ip_address, BgpAsn=bgp_asn, ) return response def tag_cgw_name(self, gw_id, name): response = self.ec2.create_tags( DryRun=False, Resources=[ gw_id, ], Tags=[ { 'Key': 'Name', 'Value': name }, ] ) return response def describe_gateways(self, ip_address): response = self.ec2.describe_customer_gateways( DryRun=False, Filters=[ { 'Name': 'state', 'Values': [ 'available', ] }, { 'Name': 'ip-address', 'Values': [ ip_address, ] } ] ) return response def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( bgp_asn=dict(required=False, type='int'), ip_address=dict(required=True), name=dict(required=True), routing=dict(default='dynamic', choices=['dynamic', 'static']), state=dict(default='present', choices=['present', 'absent']), ) ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_if=[ ('routing', 'dynamic', ['bgp_asn']) ] ) if not HAS_BOTOCORE: module.fail_json(msg='botocore is required.') if not HAS_BOTO3: module.fail_json(msg='boto3 is required.') gw_mgr = Ec2CustomerGatewayManager(module) name = module.params.get('name') existing = gw_mgr.describe_gateways(module.params['ip_address']) results = dict(changed=False) if module.params['state'] == 'present': if existing['CustomerGateways']: existing['CustomerGateway'] = existing['CustomerGateways'][0] results['gateway'] = existing if existing['CustomerGateway']['Tags']: tag_array = existing['CustomerGateway']['Tags'] for key, value in enumerate(tag_array): if value['Key'] == 'Name': current_name = value['Value'] if current_name != name: results['name'] = gw_mgr.tag_cgw_name( results['gateway']['CustomerGateway']['CustomerGatewayId'], module.params['name'], ) results['changed'] = True else: if not module.check_mode: results['gateway'] = gw_mgr.ensure_cgw_present( module.params['bgp_asn'], module.params['ip_address'], ) results['name'] = gw_mgr.tag_cgw_name( results['gateway']['CustomerGateway']['CustomerGatewayId'], module.params['name'], ) results['changed'] = True elif module.params['state'] == 'absent': if existing['CustomerGateways']: existing['CustomerGateway'] = existing['CustomerGateways'][0] results['gateway'] = existing if not module.check_mode: results['gateway'] = gw_mgr.ensure_cgw_absent( existing['CustomerGateway']['CustomerGatewayId'] ) results['changed'] = True pretty_results = camel_dict_to_snake_dict(results) module.exit_json(**pretty_results) if __name__ == '__main__': main()
gpl-3.0
matsprea/omim
tools/user_code_coverage.py
53
1598
import os import json import sys if len(sys.argv) < 3: print "USAGE: " + sys.argv[0] + " [username] [htmlfile]" exit() USERNAME = sys.argv[1] HTMLFILE = sys.argv[1] if __name__ == "__main__": os.system('git log --pretty="%H" --author="'+USERNAME+'" | while read commit_hash; do git show --oneline --name-only $commit_hash | tail -n+2; done | sort | uniq > /tmp/wrote.files') files = {} for f in open('/tmp/wrote.files'): f = f.strip() if os.path.exists(f): os.system("git blame -w "+f+" > /tmp/wrote.blame") stat = {'total': 0, 'unclean': 0} for line in open('/tmp/wrote.blame'): stat['total'] += 1 if USERNAME in line: stat['unclean'] += 1 files[f] = stat html = open(HTMLFILE, 'w') print >> html, "<html><head><script src='http://www.kryogenix.org/code/browser/sorttable/sorttable.js'></script></head><body><table border=1 cellspacing=0 width=100% class='sortable'>" keys = files.keys() keys.sort(key = lambda a: 1. * files[a]['unclean'] / max(files[a]['total'],0.01)) keys.sort(key = lambda a: files[a]['unclean']) keys.reverse() print >> html, "<tr><td><b>Filename</b></td><td><b>dirty LOC</b></td><td><b>LOC</b></td><td width=300><b>meter</b></td></tr>" for k in keys: v = files[k] print >> html, "<tr><td>%s</td><td>%s</td><td>%s</td><td width=300><meter style='width:300' value='%s' max='%s'> </meter></td></tr>"%(k,v['unclean'], v['total'],v['unclean'], v['total'] ) print >> html, "</body></html>"
apache-2.0
NeuralEnsemble/neuroConstruct
lib/jython/Lib/encodings/cp1254.py
593
13758
""" Python Character Mapping Codec cp1254 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1254.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='cp1254', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( u'\x00' # 0x00 -> NULL u'\x01' # 0x01 -> START OF HEADING u'\x02' # 0x02 -> START OF TEXT u'\x03' # 0x03 -> END OF TEXT u'\x04' # 0x04 -> END OF TRANSMISSION u'\x05' # 0x05 -> ENQUIRY u'\x06' # 0x06 -> ACKNOWLEDGE u'\x07' # 0x07 -> BELL u'\x08' # 0x08 -> BACKSPACE u'\t' # 0x09 -> HORIZONTAL TABULATION u'\n' # 0x0A -> LINE FEED u'\x0b' # 0x0B -> VERTICAL TABULATION u'\x0c' # 0x0C -> FORM FEED u'\r' # 0x0D -> CARRIAGE RETURN u'\x0e' # 0x0E -> SHIFT OUT u'\x0f' # 0x0F -> SHIFT IN u'\x10' # 0x10 -> DATA LINK ESCAPE u'\x11' # 0x11 -> DEVICE CONTROL ONE u'\x12' # 0x12 -> DEVICE CONTROL TWO u'\x13' # 0x13 -> DEVICE CONTROL THREE u'\x14' # 0x14 -> DEVICE CONTROL FOUR u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE u'\x16' # 0x16 -> SYNCHRONOUS IDLE u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK u'\x18' # 0x18 -> CANCEL u'\x19' # 0x19 -> END OF MEDIUM u'\x1a' # 0x1A -> SUBSTITUTE u'\x1b' # 0x1B -> ESCAPE u'\x1c' # 0x1C -> FILE SEPARATOR u'\x1d' # 0x1D -> GROUP SEPARATOR u'\x1e' # 0x1E -> RECORD SEPARATOR u'\x1f' # 0x1F -> UNIT SEPARATOR u' ' # 0x20 -> SPACE u'!' # 0x21 -> EXCLAMATION MARK u'"' # 0x22 -> QUOTATION MARK u'#' # 0x23 -> NUMBER SIGN u'$' # 0x24 -> DOLLAR SIGN u'%' # 0x25 -> PERCENT SIGN u'&' # 0x26 -> AMPERSAND u"'" # 0x27 -> APOSTROPHE u'(' # 0x28 -> LEFT PARENTHESIS u')' # 0x29 -> RIGHT PARENTHESIS u'*' # 0x2A -> ASTERISK u'+' # 0x2B -> PLUS SIGN u',' # 0x2C -> COMMA u'-' # 0x2D -> HYPHEN-MINUS u'.' # 0x2E -> FULL STOP u'/' # 0x2F -> SOLIDUS u'0' # 0x30 -> DIGIT ZERO u'1' # 0x31 -> DIGIT ONE u'2' # 0x32 -> DIGIT TWO u'3' # 0x33 -> DIGIT THREE u'4' # 0x34 -> DIGIT FOUR u'5' # 0x35 -> DIGIT FIVE u'6' # 0x36 -> DIGIT SIX u'7' # 0x37 -> DIGIT SEVEN u'8' # 0x38 -> DIGIT EIGHT u'9' # 0x39 -> DIGIT NINE u':' # 0x3A -> COLON u';' # 0x3B -> SEMICOLON u'<' # 0x3C -> LESS-THAN SIGN u'=' # 0x3D -> EQUALS SIGN u'>' # 0x3E -> GREATER-THAN SIGN u'?' # 0x3F -> QUESTION MARK u'@' # 0x40 -> COMMERCIAL AT u'A' # 0x41 -> LATIN CAPITAL LETTER A u'B' # 0x42 -> LATIN CAPITAL LETTER B u'C' # 0x43 -> LATIN CAPITAL LETTER C u'D' # 0x44 -> LATIN CAPITAL LETTER D u'E' # 0x45 -> LATIN CAPITAL LETTER E u'F' # 0x46 -> LATIN CAPITAL LETTER F u'G' # 0x47 -> LATIN CAPITAL LETTER G u'H' # 0x48 -> LATIN CAPITAL LETTER H u'I' # 0x49 -> LATIN CAPITAL LETTER I u'J' # 0x4A -> LATIN CAPITAL LETTER J u'K' # 0x4B -> LATIN CAPITAL LETTER K u'L' # 0x4C -> LATIN CAPITAL LETTER L u'M' # 0x4D -> LATIN CAPITAL LETTER M u'N' # 0x4E -> LATIN CAPITAL LETTER N u'O' # 0x4F -> LATIN CAPITAL LETTER O u'P' # 0x50 -> LATIN CAPITAL LETTER P u'Q' # 0x51 -> LATIN CAPITAL LETTER Q u'R' # 0x52 -> LATIN CAPITAL LETTER R u'S' # 0x53 -> LATIN CAPITAL LETTER S u'T' # 0x54 -> LATIN CAPITAL LETTER T u'U' # 0x55 -> LATIN CAPITAL LETTER U u'V' # 0x56 -> LATIN CAPITAL LETTER V u'W' # 0x57 -> LATIN CAPITAL LETTER W u'X' # 0x58 -> LATIN CAPITAL LETTER X u'Y' # 0x59 -> LATIN CAPITAL LETTER Y u'Z' # 0x5A -> LATIN CAPITAL LETTER Z u'[' # 0x5B -> LEFT SQUARE BRACKET u'\\' # 0x5C -> REVERSE SOLIDUS u']' # 0x5D -> RIGHT SQUARE BRACKET u'^' # 0x5E -> CIRCUMFLEX ACCENT u'_' # 0x5F -> LOW LINE u'`' # 0x60 -> GRAVE ACCENT u'a' # 0x61 -> LATIN SMALL LETTER A u'b' # 0x62 -> LATIN SMALL LETTER B u'c' # 0x63 -> LATIN SMALL LETTER C u'd' # 0x64 -> LATIN SMALL LETTER D u'e' # 0x65 -> LATIN SMALL LETTER E u'f' # 0x66 -> LATIN SMALL LETTER F u'g' # 0x67 -> LATIN SMALL LETTER G u'h' # 0x68 -> LATIN SMALL LETTER H u'i' # 0x69 -> LATIN SMALL LETTER I u'j' # 0x6A -> LATIN SMALL LETTER J u'k' # 0x6B -> LATIN SMALL LETTER K u'l' # 0x6C -> LATIN SMALL LETTER L u'm' # 0x6D -> LATIN SMALL LETTER M u'n' # 0x6E -> LATIN SMALL LETTER N u'o' # 0x6F -> LATIN SMALL LETTER O u'p' # 0x70 -> LATIN SMALL LETTER P u'q' # 0x71 -> LATIN SMALL LETTER Q u'r' # 0x72 -> LATIN SMALL LETTER R u's' # 0x73 -> LATIN SMALL LETTER S u't' # 0x74 -> LATIN SMALL LETTER T u'u' # 0x75 -> LATIN SMALL LETTER U u'v' # 0x76 -> LATIN SMALL LETTER V u'w' # 0x77 -> LATIN SMALL LETTER W u'x' # 0x78 -> LATIN SMALL LETTER X u'y' # 0x79 -> LATIN SMALL LETTER Y u'z' # 0x7A -> LATIN SMALL LETTER Z u'{' # 0x7B -> LEFT CURLY BRACKET u'|' # 0x7C -> VERTICAL LINE u'}' # 0x7D -> RIGHT CURLY BRACKET u'~' # 0x7E -> TILDE u'\x7f' # 0x7F -> DELETE u'\u20ac' # 0x80 -> EURO SIGN u'\ufffe' # 0x81 -> UNDEFINED u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK u'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS u'\u2020' # 0x86 -> DAGGER u'\u2021' # 0x87 -> DOUBLE DAGGER u'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT u'\u2030' # 0x89 -> PER MILLE SIGN u'\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK u'\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE u'\ufffe' # 0x8D -> UNDEFINED u'\ufffe' # 0x8E -> UNDEFINED u'\ufffe' # 0x8F -> UNDEFINED u'\ufffe' # 0x90 -> UNDEFINED u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK u'\u2022' # 0x95 -> BULLET u'\u2013' # 0x96 -> EN DASH u'\u2014' # 0x97 -> EM DASH u'\u02dc' # 0x98 -> SMALL TILDE u'\u2122' # 0x99 -> TRADE MARK SIGN u'\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK u'\u0153' # 0x9C -> LATIN SMALL LIGATURE OE u'\ufffe' # 0x9D -> UNDEFINED u'\ufffe' # 0x9E -> UNDEFINED u'\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS u'\xa0' # 0xA0 -> NO-BREAK SPACE u'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK u'\xa2' # 0xA2 -> CENT SIGN u'\xa3' # 0xA3 -> POUND SIGN u'\xa4' # 0xA4 -> CURRENCY SIGN u'\xa5' # 0xA5 -> YEN SIGN u'\xa6' # 0xA6 -> BROKEN BAR u'\xa7' # 0xA7 -> SECTION SIGN u'\xa8' # 0xA8 -> DIAERESIS u'\xa9' # 0xA9 -> COPYRIGHT SIGN u'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK u'\xac' # 0xAC -> NOT SIGN u'\xad' # 0xAD -> SOFT HYPHEN u'\xae' # 0xAE -> REGISTERED SIGN u'\xaf' # 0xAF -> MACRON u'\xb0' # 0xB0 -> DEGREE SIGN u'\xb1' # 0xB1 -> PLUS-MINUS SIGN u'\xb2' # 0xB2 -> SUPERSCRIPT TWO u'\xb3' # 0xB3 -> SUPERSCRIPT THREE u'\xb4' # 0xB4 -> ACUTE ACCENT u'\xb5' # 0xB5 -> MICRO SIGN u'\xb6' # 0xB6 -> PILCROW SIGN u'\xb7' # 0xB7 -> MIDDLE DOT u'\xb8' # 0xB8 -> CEDILLA u'\xb9' # 0xB9 -> SUPERSCRIPT ONE u'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS u'\xbf' # 0xBF -> INVERTED QUESTION MARK u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS u'\u011e' # 0xD0 -> LATIN CAPITAL LETTER G WITH BREVE u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS u'\xd7' # 0xD7 -> MULTIPLICATION SIGN u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS u'\u0130' # 0xDD -> LATIN CAPITAL LETTER I WITH DOT ABOVE u'\u015e' # 0xDE -> LATIN CAPITAL LETTER S WITH CEDILLA u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS u'\u011f' # 0xF0 -> LATIN SMALL LETTER G WITH BREVE u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS u'\xf7' # 0xF7 -> DIVISION SIGN u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS u'\u0131' # 0xFD -> LATIN SMALL LETTER DOTLESS I u'\u015f' # 0xFE -> LATIN SMALL LETTER S WITH CEDILLA u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
gpl-2.0
dancingdan/tensorflow
tensorflow/examples/tutorials/input_fn/boston.py
76
2920
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """DNNRegressor with custom input_fn for Housing dataset.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import pandas as pd import tensorflow as tf tf.logging.set_verbosity(tf.logging.INFO) COLUMNS = ["crim", "zn", "indus", "nox", "rm", "age", "dis", "tax", "ptratio", "medv"] FEATURES = ["crim", "zn", "indus", "nox", "rm", "age", "dis", "tax", "ptratio"] LABEL = "medv" def get_input_fn(data_set, num_epochs=None, shuffle=True): return tf.estimator.inputs.pandas_input_fn( x=pd.DataFrame({k: data_set[k].values for k in FEATURES}), y=pd.Series(data_set[LABEL].values), num_epochs=num_epochs, shuffle=shuffle) def main(unused_argv): # Load datasets training_set = pd.read_csv("boston_train.csv", skipinitialspace=True, skiprows=1, names=COLUMNS) test_set = pd.read_csv("boston_test.csv", skipinitialspace=True, skiprows=1, names=COLUMNS) # Set of 6 examples for which to predict median house values prediction_set = pd.read_csv("boston_predict.csv", skipinitialspace=True, skiprows=1, names=COLUMNS) # Feature cols feature_cols = [tf.feature_column.numeric_column(k) for k in FEATURES] # Build 2 layer fully connected DNN with 10, 10 units respectively. regressor = tf.estimator.DNNRegressor(feature_columns=feature_cols, hidden_units=[10, 10], model_dir="/tmp/boston_model") # Train regressor.train(input_fn=get_input_fn(training_set), steps=5000) # Evaluate loss over one epoch of test_set. ev = regressor.evaluate( input_fn=get_input_fn(test_set, num_epochs=1, shuffle=False)) loss_score = ev["loss"] print("Loss: {0:f}".format(loss_score)) # Print out predictions over a slice of prediction_set. y = regressor.predict( input_fn=get_input_fn(prediction_set, num_epochs=1, shuffle=False)) # .predict() returns an iterator of dicts; convert to a list and print # predictions predictions = list(p["predictions"] for p in itertools.islice(y, 6)) print("Predictions: {}".format(str(predictions))) if __name__ == "__main__": tf.app.run()
apache-2.0
tvtsoft/odoo8
addons/hr_holidays/hr_holidays.py
3
36891
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. # Copyright (c) 2005-2006 Axelor SARL. (http://www.axelor.com) import calendar import datetime from datetime import date import logging import math import time from operator import attrgetter from dateutil.relativedelta import relativedelta import pytz from openerp.exceptions import UserError, AccessError from openerp import SUPERUSER_ID from openerp import tools from openerp.osv import fields, osv from openerp.tools.translate import _ _logger = logging.getLogger(__name__) class hr_holidays_status(osv.osv): _name = "hr.holidays.status" _description = "Leave Type" def get_days(self, cr, uid, ids, employee_id, context=None): result = dict((id, dict(max_leaves=0, leaves_taken=0, remaining_leaves=0, virtual_remaining_leaves=0)) for id in ids) holiday_ids = self.pool['hr.holidays'].search(cr, uid, [('employee_id', '=', employee_id), ('state', 'in', ['confirm', 'validate1', 'validate']), ('holiday_status_id', 'in', ids) ], context=context) for holiday in self.pool['hr.holidays'].browse(cr, uid, holiday_ids, context=context): status_dict = result[holiday.holiday_status_id.id] if holiday.type == 'add': if holiday.state == 'validate': # note: add only validated allocation even for the virtual # count; otherwise pending then refused allocation allow # the employee to create more leaves than possible status_dict['virtual_remaining_leaves'] += holiday.number_of_days_temp status_dict['max_leaves'] += holiday.number_of_days_temp status_dict['remaining_leaves'] += holiday.number_of_days_temp elif holiday.type == 'remove': # number of days is negative status_dict['virtual_remaining_leaves'] -= holiday.number_of_days_temp if holiday.state == 'validate': status_dict['leaves_taken'] += holiday.number_of_days_temp status_dict['remaining_leaves'] -= holiday.number_of_days_temp return result def _user_left_days(self, cr, uid, ids, name, args, context=None): employee_id = False if context and 'employee_id' in context: employee_id = context['employee_id'] else: employee_ids = self.pool.get('hr.employee').search(cr, uid, [('user_id', '=', uid)], context=context) if employee_ids: employee_id = employee_ids[0] if employee_id: res = self.get_days(cr, uid, ids, employee_id, context=context) else: res = dict((res_id, {'leaves_taken': 0, 'remaining_leaves': 0, 'max_leaves': 0}) for res_id in ids) return res _columns = { 'name': fields.char('Leave Type', size=64, required=True, translate=True), 'categ_id': fields.many2one('calendar.event.type', 'Meeting Type', help='Once a leave is validated, Odoo will create a corresponding meeting of this type in the calendar.'), 'color_name': fields.selection([('red', 'Red'),('blue','Blue'), ('lightgreen', 'Light Green'), ('lightblue','Light Blue'), ('lightyellow', 'Light Yellow'), ('magenta', 'Magenta'),('lightcyan', 'Light Cyan'),('black', 'Black'),('lightpink', 'Light Pink'),('brown', 'Brown'),('violet', 'Violet'),('lightcoral', 'Light Coral'),('lightsalmon', 'Light Salmon'),('lavender', 'Lavender'),('wheat', 'Wheat'),('ivory', 'Ivory')],'Color in Report', required=True, help='This color will be used in the leaves summary located in Reporting\Leaves by Department.'), 'limit': fields.boolean('Allow to Override Limit', help='If you select this check box, the system allows the employees to take more leaves than the available ones for this type and will not take them into account for the "Remaining Legal Leaves" defined on the employee form.'), 'active': fields.boolean('Active', help="If the active field is set to false, it will allow you to hide the leave type without removing it."), 'max_leaves': fields.function(_user_left_days, string='Maximum Allowed', help='This value is given by the sum of all holidays requests with a positive value.', multi='user_left_days'), 'leaves_taken': fields.function(_user_left_days, string='Leaves Already Taken', help='This value is given by the sum of all holidays requests with a negative value.', multi='user_left_days'), 'remaining_leaves': fields.function(_user_left_days, string='Remaining Leaves', help='Maximum Leaves Allowed - Leaves Already Taken', multi='user_left_days'), 'virtual_remaining_leaves': fields.function(_user_left_days, string='Virtual Remaining Leaves', help='Maximum Leaves Allowed - Leaves Already Taken - Leaves Waiting Approval', multi='user_left_days'), 'double_validation': fields.boolean('Apply Double Validation', help="When selected, the Allocation/Leave Requests for this type require a second validation to be approved."), } _defaults = { 'color_name': 'red', 'active': True, } def name_get(self, cr, uid, ids, context=None): if context is None: context = {} if not context.get('employee_id'): # leave counts is based on employee_id, would be inaccurate if not based on correct employee return super(hr_holidays_status, self).name_get(cr, uid, ids, context=context) res = [] for record in self.browse(cr, uid, ids, context=context): name = record.name if not record.limit: name = name + (' (%g/%g)' % (record.virtual_remaining_leaves or 0.0, record.max_leaves or 0.0)) res.append((record.id, name)) return res def _search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None): """ Override _search to order the results, according to some employee. The order is the following - limit (limited leaves first, such as Legal Leaves) - virtual remaining leaves (higher the better, so using reverse on sorted) This override is necessary because those fields are not stored and depends on an employee_id given in context. This sort will be done when there is an employee_id in context and that no other order has been given to the method. """ if context is None: context = {} ids = super(hr_holidays_status, self)._search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=count, access_rights_uid=access_rights_uid) if not count and not order and context.get('employee_id'): leaves = self.browse(cr, uid, ids, context=context) sort_key = lambda l: (not l.limit, l.virtual_remaining_leaves) return map(int, leaves.sorted(key=sort_key, reverse=True)) return ids class hr_holidays(osv.osv): _name = "hr.holidays" _description = "Leave" _order = "type desc, date_from asc" _inherit = ['mail.thread', 'ir.needaction_mixin'] def _employee_get(self, cr, uid, context=None): emp_id = context.get('default_employee_id', False) if emp_id: return emp_id ids = self.pool.get('hr.employee').search(cr, uid, [('user_id', '=', uid)], context=context) if ids: return ids[0] return False def _compute_number_of_days(self, cr, uid, ids, name, args, context=None): result = {} for hol in self.browse(cr, uid, ids, context=context): if hol.type=='remove': result[hol.id] = -hol.number_of_days_temp else: result[hol.id] = hol.number_of_days_temp return result def _get_can_reset(self, cr, uid, ids, name, arg, context=None): """User can reset a leave request if it is its own leave request or if he is an Hr Manager. """ user = self.pool['res.users'].browse(cr, uid, uid, context=context) group_hr_manager_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'group_hr_manager')[1] if group_hr_manager_id in [g.id for g in user.groups_id]: return dict.fromkeys(ids, True) result = dict.fromkeys(ids, False) for holiday in self.browse(cr, uid, ids, context=context): if holiday.employee_id and holiday.employee_id.user_id and holiday.employee_id.user_id.id == uid: result[holiday.id] = True return result def _check_date(self, cr, uid, ids, context=None): for holiday in self.browse(cr, uid, ids, context=context): domain = [ ('date_from', '<=', holiday.date_to), ('date_to', '>=', holiday.date_from), ('employee_id', '=', holiday.employee_id.id), ('id', '!=', holiday.id), ('state', 'not in', ['cancel', 'refuse']), ] nholidays = self.search_count(cr, uid, domain, context=context) if nholidays: return False return True _check_holidays = lambda self, cr, uid, ids, context=None: self.check_holidays(cr, uid, ids, context=context) _columns = { 'name': fields.char('Description', size=64), 'state': fields.selection([('draft', 'To Submit'), ('cancel', 'Cancelled'),('confirm', 'To Approve'), ('refuse', 'Refused'), ('validate1', 'Second Approval'), ('validate', 'Approved')], 'Status', readonly=True, track_visibility='onchange', copy=False, help='The status is set to \'To Submit\', when a holiday request is created.\ \nThe status is \'To Approve\', when holiday request is confirmed by user.\ \nThe status is \'Refused\', when holiday request is refused by manager.\ \nThe status is \'Approved\', when holiday request is approved by manager.'), 'payslip_status': fields.boolean(string='Reported in last payslips', help='Green this button when the leave has been taken into account in the payslip.'), 'report_note': fields.text('HR Comments'), 'user_id':fields.related('employee_id', 'user_id', type='many2one', relation='res.users', string='User', store=True), 'date_from': fields.datetime('Start Date', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, select=True, copy=False), 'date_to': fields.datetime('End Date', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, copy=False), 'holiday_status_id': fields.many2one("hr.holidays.status", "Leave Type", required=True,readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}), 'employee_id': fields.many2one('hr.employee', "Employee", select=True, invisible=False, readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}), 'manager_id': fields.many2one('hr.employee', 'First Approval', invisible=False, readonly=True, copy=False, help='This area is automatically filled by the user who validate the leave'), 'notes': fields.text('Reasons',readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}), 'number_of_days_temp': fields.float('Allocation', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, copy=False), 'number_of_days': fields.function(_compute_number_of_days, string='Number of Days', store=True), 'meeting_id': fields.many2one('calendar.event', 'Meeting'), 'type': fields.selection([('remove','Leave Request'),('add','Allocation Request')], 'Request Type', required=True, readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, help="Choose 'Leave Request' if someone wants to take an off-day. \nChoose 'Allocation Request' if you want to increase the number of leaves available for someone", select=True), 'parent_id': fields.many2one('hr.holidays', 'Parent'), 'linked_request_ids': fields.one2many('hr.holidays', 'parent_id', 'Linked Requests',), 'department_id':fields.related('employee_id', 'department_id', string='Department', type='many2one', relation='hr.department', readonly=True, store=True), 'category_id': fields.many2one('hr.employee.category', "Employee Tag", help='Category of Employee', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}), 'holiday_type': fields.selection([('employee','By Employee'),('category','By Employee Tag')], 'Allocation Mode', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, help='By Employee: Allocation/Request for individual Employee, By Employee Tag: Allocation/Request for group of employees in category', required=True), 'manager_id2': fields.many2one('hr.employee', 'Second Approval', readonly=True, copy=False, help='This area is automaticly filled by the user who validate the leave with second level (If Leave type need second validation)'), 'double_validation': fields.related('holiday_status_id', 'double_validation', type='boolean', relation='hr.holidays.status', string='Apply Double Validation'), 'can_reset': fields.function( _get_can_reset, string="Can reset", type='boolean'), } _defaults = { 'employee_id': _employee_get, 'state': 'confirm', 'type': 'remove', 'user_id': lambda obj, cr, uid, context: uid, 'holiday_type': 'employee', 'payslip_status': False, } _constraints = [ (_check_date, 'You can not have 2 leaves that overlaps on same day!', ['date_from', 'date_to']), (_check_holidays, 'The number of remaining leaves is not sufficient for this leave type.\n' 'Please verify also the leaves waiting for validation.', ['state', 'number_of_days_temp']) ] _sql_constraints = [ ('type_value', "CHECK( (holiday_type='employee' AND employee_id IS NOT NULL) or (holiday_type='category' AND category_id IS NOT NULL))", "The employee or employee category of this request is missing. Please make sure that your user login is linked to an employee."), ('date_check2', "CHECK ( (type='add') OR (date_from <= date_to))", "The start date must be anterior to the end date."), ('date_check', "CHECK ( number_of_days_temp >= 0 )", "The number of days must be greater than 0."), ] def _create_resource_leave(self, cr, uid, leaves, context=None): '''This method will create entry in resource calendar leave object at the time of holidays validated ''' obj_res_leave = self.pool.get('resource.calendar.leaves') for leave in leaves: vals = { 'name': leave.name, 'date_from': leave.date_from, 'holiday_id': leave.id, 'date_to': leave.date_to, 'resource_id': leave.employee_id.resource_id.id, 'calendar_id': leave.employee_id.resource_id.calendar_id.id } obj_res_leave.create(cr, uid, vals, context=context) return True def _remove_resource_leave(self, cr, uid, ids, context=None): '''This method will create entry in resource calendar leave object at the time of holidays cancel/removed''' obj_res_leave = self.pool.get('resource.calendar.leaves') leave_ids = obj_res_leave.search(cr, uid, [('holiday_id', 'in', ids)], context=context) return obj_res_leave.unlink(cr, uid, leave_ids, context=context) def onchange_type(self, cr, uid, ids, holiday_type, employee_id=False, context=None): result = {} if holiday_type == 'employee' and not employee_id: ids_employee = self.pool.get('hr.employee').search(cr, uid, [('user_id','=', uid)]) if ids_employee: result['value'] = { 'employee_id': ids_employee[0] } elif holiday_type != 'employee': result['value'] = { 'employee_id': False } return result def onchange_employee(self, cr, uid, ids, employee_id): result = {'value': {'department_id': False}} if employee_id: employee = self.pool.get('hr.employee').browse(cr, uid, employee_id) result['value'] = {'department_id': employee.department_id.id} return result # TODO: can be improved using resource calendar method def _get_number_of_days(self, date_from, date_to): """Returns a float equals to the timedelta between two dates given as string.""" DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S" from_dt = datetime.datetime.strptime(date_from, DATETIME_FORMAT) to_dt = datetime.datetime.strptime(date_to, DATETIME_FORMAT) timedelta = to_dt - from_dt diff_day = timedelta.days + float(timedelta.seconds) / 86400 return diff_day def unlink(self, cr, uid, ids, context=None): for rec in self.browse(cr, uid, ids, context=context): if rec.state not in ['draft', 'cancel', 'confirm']: raise UserError(_('You cannot delete a leave which is in %s state.') % (rec.state,)) return super(hr_holidays, self).unlink(cr, uid, ids, context) def onchange_date_from(self, cr, uid, ids, date_to, date_from): """ If there are no date set for date_to, automatically set one 8 hours later than the date_from. Also update the number_of_days. """ # date_to has to be greater than date_from if (date_from and date_to) and (date_from > date_to): raise UserError(_('The start date must be anterior to the end date.')) result = {'value': {}} # No date_to set so far: automatically compute one 8 hours later if date_from and not date_to: date_to_with_delta = datetime.datetime.strptime(date_from, tools.DEFAULT_SERVER_DATETIME_FORMAT) + datetime.timedelta(hours=8) result['value']['date_to'] = str(date_to_with_delta) # Compute and update the number of days if (date_to and date_from) and (date_from <= date_to): diff_day = self._get_number_of_days(date_from, date_to) result['value']['number_of_days_temp'] = round(math.floor(diff_day))+1 else: result['value']['number_of_days_temp'] = 0 return result def onchange_date_to(self, cr, uid, ids, date_to, date_from): """ Update the number_of_days. """ # date_to has to be greater than date_from if (date_from and date_to) and (date_from > date_to): raise UserError(_('The start date must be anterior to the end date.')) result = {'value': {}} # Compute and update the number of days if (date_to and date_from) and (date_from <= date_to): diff_day = self._get_number_of_days(date_from, date_to) result['value']['number_of_days_temp'] = round(math.floor(diff_day))+1 else: result['value']['number_of_days_temp'] = 0 return result def add_follower(self, cr, uid, ids, employee_id, context=None): employee = self.pool.get('hr.employee').browse(cr, uid, employee_id, context=context) if employee and employee.user_id: self.message_subscribe_users(cr, uid, ids, user_ids=[employee.user_id.id], context=context) def create(self, cr, uid, values, context=None): """ Override to avoid automatic logging of creation """ if context is None: context = {} employee_id = values.get('employee_id', False) context = dict(context, mail_create_nolog=True) if values.get('state') and values['state'] not in ['draft', 'confirm', 'cancel'] and not self.pool['res.users'].has_group(cr, uid, 'base.group_hr_user'): raise AccessError(_('You cannot set a leave request as \'%s\'. Contact a human resource manager.') % values.get('state')) hr_holiday_id = super(hr_holidays, self).create(cr, uid, values, context=context) self.add_follower(cr, uid, [hr_holiday_id], employee_id, context=context) return hr_holiday_id def write(self, cr, uid, ids, vals, context=None): employee_id = vals.get('employee_id', False) if vals.get('state') and vals['state'] not in ['draft', 'confirm', 'cancel'] and not self.pool['res.users'].has_group(cr, uid, 'base.group_hr_user'): raise AccessError(_('You cannot set a leave request as \'%s\'. Contact a human resource manager.') % vals.get('state')) hr_holiday_id = super(hr_holidays, self).write(cr, uid, ids, vals, context=context) self.add_follower(cr, uid, ids, employee_id, context=context) return hr_holiday_id def holidays_reset(self, cr, uid, ids, context=None): self.write(cr, uid, ids, { 'state': 'draft', 'manager_id': False, 'manager_id2': False, }) to_unlink = [] for record in self.browse(cr, uid, ids, context=context): for record2 in record.linked_request_ids: self.holidays_reset(cr, uid, [record2.id], context=context) to_unlink.append(record2.id) if to_unlink: self.unlink(cr, uid, to_unlink, context=context) return True def holidays_first_validate(self, cr, uid, ids, context=None): obj_emp = self.pool.get('hr.employee') ids2 = obj_emp.search(cr, uid, [('user_id', '=', uid)]) manager = ids2 and ids2[0] or False return self.write(cr, uid, ids, {'state': 'validate1', 'manager_id': manager}, context=context) def holidays_validate(self, cr, uid, ids, context=None): obj_emp = self.pool.get('hr.employee') ids2 = obj_emp.search(cr, uid, [('user_id', '=', uid)]) manager = ids2 and ids2[0] or False self.write(cr, uid, ids, {'state': 'validate'}, context=context) data_holiday = self.browse(cr, uid, ids) for record in data_holiday: if record.double_validation: self.write(cr, uid, [record.id], {'manager_id2': manager}) else: self.write(cr, uid, [record.id], {'manager_id': manager}) if record.holiday_type == 'employee' and record.type == 'remove': meeting_obj = self.pool.get('calendar.event') meeting_vals = { 'name': record.name or _('Leave Request'), 'categ_ids': record.holiday_status_id.categ_id and [(6,0,[record.holiday_status_id.categ_id.id])] or [], 'duration': record.number_of_days_temp * 8, 'description': record.notes, 'user_id': record.user_id.id, 'start': record.date_from, 'stop': record.date_to, 'allday': False, 'state': 'open', # to block that meeting date in the calendar 'class': 'confidential' } #Add the partner_id (if exist) as an attendee if record.user_id and record.user_id.partner_id: meeting_vals['partner_ids'] = [(4,record.user_id.partner_id.id)] ctx_no_email = dict(context or {}, no_email=True) meeting_id = meeting_obj.create(cr, uid, meeting_vals, context=ctx_no_email) self._create_resource_leave(cr, uid, [record], context=context) self.write(cr, uid, ids, {'meeting_id': meeting_id}) elif record.holiday_type == 'category': emp_ids = obj_emp.search(cr, uid, [('category_ids', 'child_of', [record.category_id.id])]) leave_ids = [] for emp in obj_emp.browse(cr, uid, emp_ids): vals = { 'name': record.name, 'type': record.type, 'holiday_type': 'employee', 'holiday_status_id': record.holiday_status_id.id, 'date_from': record.date_from, 'date_to': record.date_to, 'notes': record.notes, 'number_of_days_temp': record.number_of_days_temp, 'parent_id': record.id, 'employee_id': emp.id } leave_ids.append(self.create(cr, uid, vals, context=None)) for leave_id in leave_ids: # TODO is it necessary to interleave the calls? for sig in ('confirm', 'validate', 'second_validate'): self.signal_workflow(cr, uid, [leave_id], sig) return True def holidays_confirm(self, cr, uid, ids, context=None): for record in self.browse(cr, uid, ids, context=context): if record.employee_id and record.employee_id.parent_id and record.employee_id.parent_id.user_id: self.message_subscribe_users(cr, uid, [record.id], user_ids=[record.employee_id.parent_id.user_id.id], context=context) return self.write(cr, uid, ids, {'state': 'confirm'}) def holidays_refuse(self, cr, uid, ids, context=None): obj_emp = self.pool.get('hr.employee') ids2 = obj_emp.search(cr, uid, [('user_id', '=', uid)]) manager = ids2 and ids2[0] or False for holiday in self.browse(cr, uid, ids, context=context): if holiday.state == 'validate1': self.write(cr, uid, [holiday.id], {'state': 'refuse', 'manager_id': manager}) else: self.write(cr, uid, [holiday.id], {'state': 'refuse', 'manager_id2': manager}) self.holidays_cancel(cr, uid, ids, context=context) return True def holidays_cancel(self, cr, uid, ids, context=None): for record in self.browse(cr, uid, ids): # Delete the meeting if record.meeting_id: record.meeting_id.unlink() # If a category that created several holidays, cancel all related self.signal_workflow(cr, uid, map(attrgetter('id'), record.linked_request_ids or []), 'refuse') self._remove_resource_leave(cr, uid, ids, context=context) return True def check_holidays(self, cr, uid, ids, context=None): for record in self.browse(cr, uid, ids, context=context): if record.holiday_type != 'employee' or record.type != 'remove' or not record.employee_id or record.holiday_status_id.limit: continue leave_days = self.pool.get('hr.holidays.status').get_days(cr, uid, [record.holiday_status_id.id], record.employee_id.id, context=context)[record.holiday_status_id.id] if leave_days['remaining_leaves'] < 0 or leave_days['virtual_remaining_leaves'] < 0: return False return True def toggle_payslip_status(self, cr, uid, ids, context=None): ids_to_set_true = self.search(cr, uid, [('id', 'in', ids), ('payslip_status', '=', False)], context=context) ids_to_set_false = list(set(ids) - set(ids_to_set_true)) return self.write(cr, uid, ids_to_set_true, {'payslip_status': True}, context=context) and self.write(cr, uid, ids_to_set_false, {'payslip_status': False}, context=context) def _track_subtype(self, cr, uid, ids, init_values, context=None): record = self.browse(cr, uid, ids[0], context=context) if 'state' in init_values and record.state == 'validate': return 'hr_holidays.mt_holidays_approved' elif 'state' in init_values and record.state == 'validate1': return 'hr_holidays.mt_holidays_first_validated' elif 'state' in init_values and record.state == 'confirm': return 'hr_holidays.mt_holidays_confirmed' elif 'state' in init_values and record.state == 'refuse': return 'hr_holidays.mt_holidays_refused' return super(hr_holidays, self)._track_subtype(cr, uid, ids, init_values, context=context) class resource_calendar_leaves(osv.osv): _inherit = "resource.calendar.leaves" _description = "Leave Detail" _columns = { 'holiday_id': fields.many2one("hr.holidays", "Leave Request"), } class hr_employee(osv.Model): _inherit = "hr.employee" def _set_remaining_days(self, cr, uid, empl_id, name, value, arg, context=None): if value: employee = self.browse(cr, uid, empl_id, context=context) diff = value - employee.remaining_leaves type_obj = self.pool.get('hr.holidays.status') holiday_obj = self.pool.get('hr.holidays') # Find for holidays status status_ids = type_obj.search(cr, uid, [('limit', '=', False)], context=context) if len(status_ids) != 1 : raise osv.except_osv(_('Warning!'),_("The feature behind the field 'Remaining Legal Leaves' can only be used when there is only one leave type with the option 'Allow to Override Limit' unchecked. (%s Found). Otherwise, the update is ambiguous as we cannot decide on which leave type the update has to be done. \nYou may prefer to use the classic menus 'Leave Requests' and 'Allocation Requests' located in 'Human Resources \ Leaves' to manage the leave days of the employees if the configuration does not allow to use this field.") % (len(status_ids))) status_id = status_ids and status_ids[0] or False if not status_id: return False if diff > 0: leave_id = holiday_obj.create(cr, uid, {'name': _('Allocation for %s') % employee.name, 'employee_id': employee.id, 'holiday_status_id': status_id, 'type': 'add', 'holiday_type': 'employee', 'number_of_days_temp': diff}, context=context) elif diff < 0: raise osv.except_osv(_('Warning!'), _('You cannot reduce validated allocation requests')) else: return False for sig in ('confirm', 'validate', 'second_validate'): holiday_obj.signal_workflow(cr, uid, [leave_id], sig) return True return False def _get_remaining_days(self, cr, uid, ids, name, args, context=None): cr.execute("""SELECT sum(h.number_of_days) as days, h.employee_id from hr_holidays h join hr_holidays_status s on (s.id=h.holiday_status_id) where h.state='validate' and s.limit=False and h.employee_id in %s group by h.employee_id""", (tuple(ids),)) res = cr.dictfetchall() remaining = {} for r in res: remaining[r['employee_id']] = r['days'] for employee_id in ids: if not remaining.get(employee_id): remaining[employee_id] = 0.0 return remaining def _get_leave_status(self, cr, uid, ids, name, args, context=None): holidays_obj = self.pool.get('hr.holidays') holidays_id = holidays_obj.search(cr, uid, [('employee_id', 'in', ids), ('date_from','<=',time.strftime('%Y-%m-%d %H:%M:%S')), ('date_to','>=',time.strftime('%Y-%m-%d 23:59:59')),('type','=','remove'),('state','not in',('cancel','refuse'))], context=context) result = {} for id in ids: result[id] = { 'current_leave_state': False, 'current_leave_id': False, 'leave_date_from':False, 'leave_date_to':False, } for holiday in self.pool.get('hr.holidays').browse(cr, uid, holidays_id, context=context): result[holiday.employee_id.id]['leave_date_from'] = holiday.date_from result[holiday.employee_id.id]['leave_date_to'] = holiday.date_to result[holiday.employee_id.id]['current_leave_state'] = holiday.state result[holiday.employee_id.id]['current_leave_id'] = holiday.holiday_status_id.id return result def _leaves_count(self, cr, uid, ids, field_name, arg, context=None): res = {} Holidays = self.pool['hr.holidays'] date_begin = date.today().replace(day=1) date_end = date_begin.replace(day=calendar.monthrange(date_begin.year, date_begin.month)[1]) for employee_id in ids: leaves = Holidays.search_count(cr, uid, [('employee_id', '=', employee_id), ('type', '=', 'remove')], context=context) approved_leaves = Holidays.search_count(cr, uid, [('employee_id', '=', employee_id), ('type', '=', 'remove'), ('date_from', '>=', date_begin.strftime(tools.DEFAULT_SERVER_DATE_FORMAT)), ('date_from', '<=', date_end.strftime(tools.DEFAULT_SERVER_DATE_FORMAT)), ('state', '=', 'validate'), ('payslip_status', '=', False)], context=context) res[employee_id] = {'leaves_count': leaves, 'approved_leaves_count': approved_leaves} return res def _absent_employee(self, cr, uid, ids, field_name, arg, context=None): today_date = datetime.datetime.utcnow().date() today_start = today_date.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT) # get the midnight of the current utc day today_end = (today_date + relativedelta(hours=23, minutes=59, seconds=59)).strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT) data = self.pool['hr.holidays'].read_group(cr, uid, [('employee_id', 'in', ids), ('state', 'not in', ['cancel', 'refuse']), ('date_from', '<=', today_end), ('date_to', '>=', today_start), ('type', '=', 'remove')], ['employee_id'], ['employee_id'], context=context) result = dict.fromkeys(ids, False) for d in data: if d['employee_id_count'] >= 1: result[d['employee_id'][0]] = True return result def _search_absent_employee(self, cr, uid, obj, name, args, context=None): today_date = datetime.datetime.utcnow().date() today_start = today_date.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT) # get the midnight of the current utc day today_end = (today_date + relativedelta(hours=23, minutes=59, seconds=59)).strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT) holiday_ids = self.pool['hr.holidays'].search_read(cr, uid, [ ('state', 'not in', ['cancel', 'refuse']), ('date_from', '<=', today_end), ('date_to', '>=', today_start), ('type', '=', 'remove')], ['employee_id'], context=context) absent_employee_ids = [holiday['employee_id'][0] for holiday in holiday_ids if holiday['employee_id']] return [('id', 'in', absent_employee_ids)] _columns = { 'remaining_leaves': fields.function(_get_remaining_days, string='Remaining Legal Leaves', fnct_inv=_set_remaining_days, type="float", help='Total number of legal leaves allocated to this employee, change this value to create allocation/leave request. Total based on all the leave types without overriding limit.'), 'current_leave_state': fields.function( _get_leave_status, multi="leave_status", string="Current Leave Status", type="selection", selection=[('draft', 'New'), ('confirm', 'Waiting Approval'), ('refuse', 'Refused'), ('validate1', 'Waiting Second Approval'), ('validate', 'Approved'), ('cancel', 'Cancelled')]), 'current_leave_id': fields.function(_get_leave_status, multi="leave_status", string="Current Leave Type", type='many2one', relation='hr.holidays.status'), 'leave_date_from': fields.function(_get_leave_status, multi='leave_status', type='date', string='From Date'), 'leave_date_to': fields.function(_get_leave_status, multi='leave_status', type='date', string='To Date'), 'leaves_count': fields.function(_leaves_count, multi='_leaves_count', type='integer', string='Number of Leaves (current month)'), 'approved_leaves_count': fields.function(_leaves_count, multi='_leaves_count', type='integer', string='Approved Leaves not in Payslip', help="These leaves are approved but not taken into account for payslip"), 'is_absent_totay': fields.function(_absent_employee, fnct_search=_search_absent_employee, type="boolean", string="Absent Today", default=False) }
agpl-3.0
nikhilraog/boto
tests/integration/sdb/test_connection.py
114
4320
# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ # Copyright (c) 2010, Eucalyptus Systems, Inc. # All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. """ Some unit tests for the SDBConnection """ import unittest import time from boto.sdb.connection import SDBConnection from boto.exception import SDBResponseError class SDBConnectionTest (unittest.TestCase): sdb = True def test_1_basic(self): print('--- running SDBConnection tests ---') c = SDBConnection() rs = c.get_all_domains() num_domains = len(rs) # try illegal name try: domain = c.create_domain('bad:domain:name') except SDBResponseError: pass # now create one that should work and should be unique (i.e. a new one) domain_name = 'test%d' % int(time.time()) domain = c.create_domain(domain_name) rs = c.get_all_domains() assert len(rs) == num_domains + 1 # now let's a couple of items and attributes item_1 = 'item1' same_value = 'same_value' attrs_1 = {'name1': same_value, 'name2': 'diff_value_1'} domain.put_attributes(item_1, attrs_1) item_2 = 'item2' attrs_2 = {'name1': same_value, 'name2': 'diff_value_2'} domain.put_attributes(item_2, attrs_2) # try to get the attributes and see if they match item = domain.get_attributes(item_1, consistent_read=True) assert len(item.keys()) == len(attrs_1.keys()) assert item['name1'] == attrs_1['name1'] assert item['name2'] == attrs_1['name2'] # try a search or two query = 'select * from %s where name1="%s"' % (domain_name, same_value) rs = domain.select(query, consistent_read=True) n = 0 for item in rs: n += 1 assert n == 2 query = 'select * from %s where name2="diff_value_2"' % domain_name rs = domain.select(query, consistent_read=True) n = 0 for item in rs: n += 1 assert n == 1 # delete all attributes associated with item_1 stat = domain.delete_attributes(item_1) assert stat # now try a batch put operation on the domain item3 = {'name3_1': 'value3_1', 'name3_2': 'value3_2', 'name3_3': ['value3_3_1', 'value3_3_2']} item4 = {'name4_1': 'value4_1', 'name4_2': ['value4_2_1', 'value4_2_2'], 'name4_3': 'value4_3'} items = {'item3': item3, 'item4': item4} domain.batch_put_attributes(items) item = domain.get_attributes('item3', consistent_read=True) assert item['name3_2'] == 'value3_2' # now try a batch delete operation (variation #1) items = {'item3': item3} stat = domain.batch_delete_attributes(items) item = domain.get_attributes('item3', consistent_read=True) assert not item # now try a batch delete operation (variation #2) stat = domain.batch_delete_attributes({'item4': None}) item = domain.get_attributes('item4', consistent_read=True) assert not item # now delete the domain stat = c.delete_domain(domain) assert stat print('--- tests completed ---')
mit
NYUCCL/psiTurk
psiturk/experiment.py
1
27033
# -*- coding: utf-8 -*- """ This module provides the backend Flask server used by psiTurk. """ from __future__ import generator_stop import os import sys import datetime import logging from random import choice import user_agents import requests import re import json from jinja2 import TemplateNotFound from collections import Counter # Setup flask from flask import Flask, render_template, render_template_string, request, \ jsonify, send_from_directory # Setup database from .db import db_session, init_db from .models import Participant from sqlalchemy import or_, exc from .psiturk_statuses import * from .psiturk_config import PsiturkConfig from .experiment_errors import ExperimentError, ExperimentApiError from .user_utils import nocache # Setup config CONFIG = PsiturkConfig() CONFIG.load_config() LOG_LEVELS = [logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL] LOG_LEVEL = LOG_LEVELS[CONFIG.getint('Server Parameters', 'loglevel')] logfile = CONFIG.get("Server Parameters", "logfile") if logfile != '-': file_path = os.path.join(os.getcwd(), logfile) logging.basicConfig(filename=file_path, format='%(asctime)s %(message)s', level=LOG_LEVEL) # Let's start # =========== app = Flask("Experiment_Server") app.logger.setLevel(LOG_LEVEL) # Set cache timeout to 10 seconds for static files app.config.update(SEND_FILE_MAX_AGE_DEFAULT=10) app.secret_key = CONFIG.get('Server Parameters', 'secret_key') def check_templates_exist(): # this checks for templates that are required if you are hosting your own ad. try: try_these = ['thanks-mturksubmit.html', 'closepopup.html'] [app.jinja_env.get_template(try_this) for try_this in try_these] except TemplateNotFound as e: raise RuntimeError(( f"Missing one of the following templates: {', '.join(try_these)}." f"Copy these over from a freshly-created psiturk example experiment." f"{type(e).__name__, str(e)}" )) check_templates_exist() # Serving warm, fresh, & sweet custom, user-provided routes # ========================================================== try: sys.path.append(os.getcwd()) from custom import custom_code except ModuleNotFoundError as e: app.logger.info("Hmm... it seems no custom code (custom.py) associated \ with this project.") except ImportError as e: app.logger.error("There is custom code (custom.py) associated with this \ project but it doesn't import cleanly. Raising exception,") raise else: app.register_blueprint(custom_code) try: # noinspection PyUnresolvedReferences from custom import init_app as custom_init_app except ImportError as e: pass else: custom_init_app(app) # scheduler from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore from pytz import utc from .db import engine JOBS_TABLENAME = CONFIG.get('Database Parameters', 'jobs_table_name') jobstores = { 'default': SQLAlchemyJobStore(engine=engine, tablename=JOBS_TABLENAME) } if 'gunicorn' in os.environ.get('SERVER_SOFTWARE', ''): from apscheduler.schedulers.gevent import GeventScheduler as Scheduler else: from apscheduler.schedulers.background import BackgroundScheduler as Scheduler logging.getLogger('apscheduler').setLevel(logging.DEBUG) scheduler = Scheduler(jobstores=jobstores, timezone=utc) app.apscheduler = scheduler scheduler.app = app if CONFIG.getboolean('Server Parameters', 'do_scheduler'): app.logger.info("Scheduler starting!") scheduler.start() else: app.logger.info("Starting scheduler in 'paused' mode -- it will not run any tasks, but it can be used to create, modify, or delete tasks.") scheduler.start(paused=True) # # Dashboard # if CONFIG.getboolean('Server Parameters', 'enable_dashboard'): from .dashboard import dashboard, init_app as dashboard_init_app # management dashboard app.register_blueprint(dashboard) dashboard_init_app(app) from .api import api_blueprint app.register_blueprint(api_blueprint) init_db() # Read psiturk.js file into memory PSITURK_JS_FILE = os.path.join(os.path.dirname(__file__), "psiturk_js/psiturk.js") app.logger.info(PSITURK_JS_FILE) if os.path.exists(PSITURK_JS_FILE): PSITURK_JS_CODE = open(PSITURK_JS_FILE).read() else: PSITURK_JS_CODE = "alert('psiturk.js file not found!');" @app.errorhandler(ExperimentError) def handle_exp_error(exception): """Handle errors by sending an error page.""" app.logger.error( "%s (%s) %s", exception.value, exception.errornum, str(dict(request.args))) return exception.error_page(request, CONFIG.get('Task Parameters', 'contact_email_on_error')) @app.errorhandler(ExperimentApiError) def handle_experiment_api_error(error): # for use with API errors response = jsonify(error.to_dict()) response.status_code = error.status_code app.logger.error(error.message) return response @app.teardown_request def shutdown_session(_=None): """ Shut down session route """ db_session.remove() # Experiment counterbalancing code # ================================ def get_random_condcount(mode): """ HITs can be in one of three states: - jobs that are finished - jobs that are started but not finished - jobs that are never going to finish (user decided not to do it) Our count should be based on the first two, so we count any tasks finished or any tasks not finished that were started in the last cutoff_time minutes, as specified in the cutoff_time variable in the config file. Returns a tuple: (cond, condition) """ cutofftime = datetime.timedelta(minutes=-CONFIG.getint('Task Parameters', 'cutoff_time')) starttime = datetime.datetime.now(datetime.timezone.utc) + cutofftime try: conditions = json.load( open(os.path.join(app.root_path, 'conditions.json'))) numconds = len(list(conditions.keys())) numcounts = 1 except IOError: numconds = CONFIG.getint('Task Parameters', 'num_conds') numcounts = CONFIG.getint('Task Parameters', 'num_counters') participants = Participant.query.\ filter(Participant.codeversion == CONFIG.get('Task Parameters', 'experiment_code_version')).\ filter(Participant.mode == mode).\ filter(or_(Participant.status == COMPLETED, Participant.status == CREDITED, Participant.status == SUBMITTED, Participant.status == BONUSED, Participant.beginhit > starttime)).all() counts = Counter() for cond in range(numconds): for counter in range(numcounts): counts[(cond, counter)] = 0 for participant in participants: condcount = (participant.cond, participant.counterbalance) if condcount in counts: counts[condcount] += 1 mincount = min(counts.values()) minima = [hsh for hsh, count in counts.items() if count == mincount] chosen = choice(minima) app.logger.info("given %(a)s chose %(b)s" % {'a': counts, 'b': chosen}) return chosen try: from custom import custom_get_condition as get_condition except (ModuleNotFoundError, ImportError): get_condition = get_random_condcount # Routes # ====== @app.route('/') @nocache def index(): """ Index route """ return render_template('default.html') @app.route('/favicon.ico') def favicon(): """ Serve favicon """ return app.send_static_file('favicon.ico') @app.route('/static/js/psiturk.js') def psiturk_js(): """ psiTurk js route """ return render_template_string(PSITURK_JS_CODE) @app.route('/check_worker_status', methods=['GET']) def check_worker_status(): """ Check worker status route """ if 'workerId' not in request.args: resp = {"status": "bad request"} return jsonify(**resp) else: worker_id = request.args['workerId'] assignment_id = request.args['assignmentId'] allow_repeats = CONFIG.getboolean('Task Parameters', 'allow_repeats') if allow_repeats: # if you allow repeats focus on current worker/assignment combo try: part = Participant.query.\ filter(Participant.workerid == worker_id).\ filter(Participant.assignmentid == assignment_id).one() status = part.status except exc.SQLAlchemyError: status = NOT_ACCEPTED else: # if you disallow repeats search for highest status of anything by this worker try: matches = Participant.query.\ filter(Participant.workerid == worker_id).all() numrecs = len(matches) if numrecs == 0: # this should be caught by exception, but just to be safe status = NOT_ACCEPTED else: status = max([record.status for record in matches]) except exc.SQLAlchemyError: status = NOT_ACCEPTED resp = {"status": status} return jsonify(**resp) @app.route('/ad', methods=['GET']) @app.route('/pub', methods=['GET']) @nocache def advertisement(): """ This is the url we give for the ad for our 'external question'. The ad has to display two different things: This page will be called from within mechanical turk, with url arguments hitId, assignmentId, and workerId. If the worker has not yet accepted the hit: These arguments will have null values, we should just show an ad for the experiment. If the worker has accepted the hit: These arguments will have appropriate values and we should enter the person in the database and provide a link to the experiment popup. """ user_agent_string = request.user_agent.string user_agent_obj = user_agents.parse(user_agent_string) browser_ok = True browser_exclude_rule = CONFIG.get('Task Parameters', 'browser_exclude_rule') for rule in browser_exclude_rule.split(','): myrule = rule.strip() if myrule in ["mobile", "tablet", "touchcapable", "pc", "bot"]: if (myrule == "mobile" and user_agent_obj.is_mobile) or\ (myrule == "tablet" and user_agent_obj.is_tablet) or\ (myrule == "touchcapable" and user_agent_obj.is_touch_capable) or\ (myrule == "pc" and user_agent_obj.is_pc) or\ (myrule == "bot" and user_agent_obj.is_bot): browser_ok = False elif myrule == "Safari" or myrule == "safari": if "Chrome" in user_agent_string and "Safari" in user_agent_string: pass elif "Safari" in user_agent_string: browser_ok = False elif myrule in user_agent_string: browser_ok = False if not browser_ok: # Handler for IE users if IE is not supported. raise ExperimentError('browser_type_not_allowed') if not ('hitId' in request.args and 'assignmentId' in request.args): raise ExperimentError('hit_assign_worker_id_not_set_in_mturk') hit_id = request.args['hitId'] assignment_id = request.args['assignmentId'] mode = request.args['mode'] if hit_id[:5] == "debug": debug_mode = True else: debug_mode = False already_in_db = False if 'workerId' in request.args: worker_id = request.args['workerId'] # First check if this workerId has completed the task before (v1). nrecords = Participant.query.\ filter(Participant.assignmentid != assignment_id).\ filter(Participant.workerid == worker_id).\ count() if nrecords > 0: # Already completed task already_in_db = True else: # If worker has not accepted the hit worker_id = None try: part = Participant.query.\ filter(Participant.hitid == hit_id).\ filter(Participant.assignmentid == assignment_id).\ filter(Participant.workerid == worker_id).\ one() status = part.status except exc.SQLAlchemyError: status = None allow_repeats = CONFIG.getboolean('Task Parameters', 'allow_repeats') if (status == STARTED or status == QUITEARLY) and not debug_mode: # Once participants have finished the instructions, we do not allow # them to start the task again. raise ExperimentError('already_started_exp_mturk') elif status == COMPLETED or (status == SUBMITTED and not already_in_db): # 'or status == SUBMITTED' because we suspect that sometimes the post # to mturk fails after we've set status to SUBMITTED, so really they # have not successfully submitted. This gives another chance for the # submit to work. # They've finished the experiment but haven't successfully submitted the HIT # yet. return render_template( 'thanks-mturksubmit.html', using_sandbox=(mode == "sandbox"), hitid=hit_id, assignmentid=assignment_id, workerid=worker_id ) elif already_in_db and not (debug_mode or allow_repeats): raise ExperimentError('already_did_exp_hit') elif status == ALLOCATED or not status or debug_mode: # Participant has not yet agreed to the consent. They might not # even have accepted the HIT. with open('templates/ad.html', 'r') as temp_file: ad_string = temp_file.read() ad_string = insert_mode(ad_string) return render_template_string( ad_string, mode=mode, hitid=hit_id, assignmentid=assignment_id, workerid=worker_id ) else: raise ExperimentError('status_incorrectly_set') @app.route('/consent', methods=['GET']) @nocache def give_consent(): """ Serves up the consent in the popup window. """ if not ('hitId' in request.args and 'assignmentId' in request.args and 'workerId' in request.args): raise ExperimentError('hit_assign_worker_id_not_set_in_consent') hit_id = request.args['hitId'] assignment_id = request.args['assignmentId'] worker_id = request.args['workerId'] mode = request.args['mode'] with open('templates/consent.html', 'r') as temp_file: consent_string = temp_file.read() consent_string = insert_mode(consent_string) return render_template_string( consent_string, mode=mode, hitid=hit_id, assignmentid=assignment_id, workerid=worker_id ) @app.route('/exp', methods=['GET']) @nocache def start_exp(): """ Serves up the experiment applet. """ if not (('hitId' in request.args) and ('assignmentId' in request.args) and ('workerId' in request.args) and ('mode' in request.args)): raise ExperimentError('hit_assign_worker_id_not_set_in_exp') hit_id = request.args['hitId'] assignment_id = request.args['assignmentId'] worker_id = request.args['workerId'] mode = request.args['mode'] app.logger.info("Accessing /exp: %(h)s %(a)s %(w)s " % { "h": hit_id, "a": assignment_id, "w": worker_id }) if hit_id[:5] == "debug": debug_mode = True else: debug_mode = False # Check first to see if this hitId or assignmentId exists. If so, check to # see if inExp is set allow_repeats = CONFIG.getboolean('Task Parameters', 'allow_repeats') if allow_repeats: matches = Participant.query.\ filter(Participant.workerid == worker_id).\ filter(Participant.assignmentid == assignment_id).\ all() else: matches = Participant.query.\ filter(Participant.workerid == worker_id).\ all() numrecs = len(matches) if numrecs == 0: # Choose condition and counterbalance subj_cond, subj_counter = get_condition(mode) worker_ip = "UNKNOWN" if not request.remote_addr else \ request.remote_addr browser = "UNKNOWN" if not request.user_agent.browser else \ request.user_agent.browser platform = "UNKNOWN" if not request.user_agent.platform else \ request.user_agent.platform language = "UNKNOWN" if not request.accept_languages else \ request.accept_languages.best # Set condition here and insert into database. participant_attributes = dict( assignmentid=assignment_id, workerid=worker_id, hitid=hit_id, cond=subj_cond, counterbalance=subj_counter, ipaddress=worker_ip, browser=browser, platform=platform, language=language, mode=mode ) part = Participant(**participant_attributes) db_session.add(part) db_session.commit() else: # A couple possible problems here: # 1: They've already done an assignment, then we should tell them they # can't do another one # 2: They've already worked on this assignment, and got too far to # start over. # 3: They're in the database twice for the same assignment, that should # never happen. # 4: They're returning and all is well. nrecords = 0 for record in matches: other_assignment = False if record.assignmentid != assignment_id: other_assignment = True else: nrecords += 1 if nrecords <= 1 and not other_assignment: part = matches[0] # In experiment (or later) can't restart at this point if part.status >= STARTED and not debug_mode: raise ExperimentError('already_started_exp') else: if nrecords > 1: app.logger.error("Error, hit/assignment appears in database \ more than once (serious problem)") raise ExperimentError( 'hit_assign_appears_in_database_more_than_once' ) if other_assignment: raise ExperimentError('already_did_exp_hit') ad_server_location = '/complete' return render_template( 'exp.html', uniqueId=part.uniqueid, condition=part.cond, counterbalance=part.counterbalance, adServerLoc=ad_server_location, mode=mode, contact_address=CONFIG.get( 'Task Parameters', 'contact_email_on_error'), codeversion=CONFIG.get( 'Task Parameters', 'experiment_code_version') ) @app.route('/inexp', methods=['POST']) def enterexp(): """ AJAX listener that listens for a signal from the user's script when they leave the instructions and enter the real experiment. After the server receives this signal, it will no longer allow them to re-access the experiment applet (meaning they can't do part of the experiment and referesh to start over). """ app.logger.info("Accessing /inexp") if 'uniqueId' not in request.form: raise ExperimentError('improper_inputs') unique_id = request.form['uniqueId'] try: user = Participant.query.\ filter(Participant.uniqueid == unique_id).one() user.status = STARTED user.beginexp = datetime.datetime.now(datetime.timezone.utc) db_session.add(user) db_session.commit() resp = {"status": "success"} except exc.SQLAlchemyError: app.logger.error("DB error: Unique user not found.") resp = {"status": "error, uniqueId not found"} return jsonify(**resp) # TODD SAYS: This the only route in the whole thing that uses <id> like this # where everything else uses POST! This could be confusing but is forced # somewhat by Backbone? Take heed! @app.route('/sync/<uid>', methods=['GET']) def load(uid=None): """ Load experiment data, which should be a JSON object and will be stored after converting to string. """ app.logger.info("GET /sync route with id: %s" % uid) try: user = Participant.query.\ filter(Participant.uniqueid == uid).\ one() except exc.SQLAlchemyError: app.logger.error("DB error: Unique user not found.") else: try: resp = json.loads(user.datastring) except (ValueError, TypeError, json.JSONDecodeError): resp = { "condition": user.cond, "counterbalance": user.counterbalance, "assignmentId": user.assignmentid, "workerId": user.workerid, "hitId": user.hitid, "bonus": user.bonus } return jsonify(**resp) @app.route('/sync/<uid>', methods=['PUT']) def update(uid=None): """ Save experiment data, which should be a JSON object and will be stored after converting to string. """ app.logger.info("PUT /sync route with id: %s" % uid) try: user = Participant.query.\ filter(Participant.uniqueid == uid).\ one() except exc.SQLAlchemyError: raise ExperimentApiError("DB error: Unique user not found.") user.datastring = json.dumps(request.json) db_session.add(user) db_session.commit() try: data = json.loads(user.datastring) except Exception as e: raise ExperimentApiError('failed to load json datastring back from database as object! Error was {}: {}'.format(type(e), str(e))) trial = data.get("currenttrial", None) app.logger.info("saved data for %s (current trial: %s)", uid, trial) resp = {"status": "user data saved"} return jsonify(**resp) @app.route('/quitter', methods=['POST']) def quitter(): """ Mark quitter as such. """ unique_id = request.form['uniqueId'] if unique_id[:5] == "debug": debug_mode = True else: debug_mode = False if debug_mode: resp = {"status": "didn't mark as quitter since this is debugging"} return jsonify(**resp) else: try: unique_id = request.form['uniqueId'] app.logger.info("Marking quitter %s" % unique_id) user = Participant.query.\ filter(Participant.uniqueid == unique_id).\ one() user.status = QUITEARLY db_session.add(user) db_session.commit() except exc.SQLAlchemyError: raise ExperimentError('tried_to_quit') else: resp = {"status": "marked as quitter"} return jsonify(**resp) # Note: This route should only used when debugging # or when not using the psiturk adserver @app.route('/complete', methods=['GET']) @nocache def debug_complete(): """ Debugging route for complete. """ if 'uniqueId' not in request.args: raise ExperimentError('improper_inputs') else: unique_id = request.args['uniqueId'] mode = request.args['mode'] try: user = Participant.query.\ filter(Participant.uniqueid == unique_id).one() user.status = COMPLETED user.endhit = datetime.datetime.now(datetime.timezone.utc) db_session.add(user) db_session.commit() except exc.SQLAlchemyError: raise ExperimentError('error_setting_worker_complete') else: # send them back to mturk. if mode == 'sandbox' or mode == 'live': return render_template('closepopup.html') else: allow_repeats = CONFIG.getboolean('Task Parameters', 'allow_repeats') return render_template('complete.html', allow_repeats=allow_repeats, worker_id=user.workerid) @app.route('/worker_complete', methods=['GET']) def worker_complete(): """ Complete worker. """ if 'uniqueId' not in request.args: resp = {"status": "bad request"} return jsonify(**resp) else: unique_id = request.args['uniqueId'] app.logger.info("Completed experiment %s" % unique_id) try: user = Participant.query.\ filter(Participant.uniqueid == unique_id).one() user.status = COMPLETED user.endhit = datetime.datetime.now(datetime.timezone.utc) db_session.add(user) db_session.commit() status = "success" except exc.SQLAlchemyError: status = "database error" resp = {"status": status} return jsonify(**resp) @app.route('/worker_submitted', methods=['GET']) def worker_submitted(): """ Submit worker """ if 'uniqueId' not in request.args: resp = {"status": "bad request"} return jsonify(**resp) else: unique_id = request.args['uniqueId'] app.logger.info("Submitted experiment for %s" % unique_id) try: user = Participant.query.\ filter(Participant.uniqueid == unique_id).one() user.status = SUBMITTED db_session.add(user) db_session.commit() status = "success" except exc.SQLAlchemyError: status = "database error" resp = {"status": status} return jsonify(**resp) # Is this a security risk? @app.route("/ppid") def ppid(): """ Get ppid """ proc_id = os.getppid() return str(proc_id) # Insert "mode" into pages so it's carried from page to page done server-side # to avoid breaking backwards compatibility with old templates. def insert_mode(page_html): """ Insert mode """ page_html = page_html match_found = False matches = re.finditer('workerId={{ workerid }}', page_html) match = None for match in matches: match_found = True if match_found: new_html = page_html[:match.end()] + '&mode={{ mode }}' +\ page_html[match.end():] return new_html else: raise ExperimentError("insert_mode_failed") # Generic route # ============= @app.route('/<path:path>') def regularpage(path): """ Route not found by the other routes above. May point to a static template. """ return send_from_directory('templates', path) def run_webserver(): """ Run web server """ host = CONFIG.get('Server Parameters', 'host') port = CONFIG.getint('Server Parameters', 'port') print(f"Serving on http://{host}:{port}") app.config['TEMPLATES_AUTO_RELOAD'] = True app.jinja_env.auto_reload = True app.run(debug=True, host=host, port=port) if __name__ == '__main__': run_webserver()
mit
zahari/samba
buildtools/wafadmin/Tools/winres.py
21
1286
#!/usr/bin/env python # encoding: utf-8 # Brant Young, 2007 "This hook is called when the class cpp/cc task generator encounters a '.rc' file: X{.rc -> [.res|.rc.o]}" import os, sys, re import TaskGen, Task from Utils import quote_whitespace from TaskGen import extension EXT_WINRC = ['.rc'] winrc_str = '${WINRC} ${_CPPDEFFLAGS} ${_CCDEFFLAGS} ${WINRCFLAGS} ${_CPPINCFLAGS} ${_CCINCFLAGS} ${WINRC_TGT_F} ${TGT} ${WINRC_SRC_F} ${SRC}' @extension(EXT_WINRC) def rc_file(self, node): obj_ext = '.rc.o' if self.env['WINRC_TGT_F'] == '/fo': obj_ext = '.res' rctask = self.create_task('winrc', node, node.change_ext(obj_ext)) self.compiled_tasks.append(rctask) # create our action, for use with rc file Task.simple_task_type('winrc', winrc_str, color='BLUE', before='cc cxx', shell=False) def detect(conf): v = conf.env winrc = v['WINRC'] v['WINRC_TGT_F'] = '-o' v['WINRC_SRC_F'] = '-i' # find rc.exe if not winrc: if v['CC_NAME'] in ['gcc', 'cc', 'g++', 'c++']: winrc = conf.find_program('windres', var='WINRC', path_list = v['PATH']) elif v['CC_NAME'] == 'msvc': winrc = conf.find_program('RC', var='WINRC', path_list = v['PATH']) v['WINRC_TGT_F'] = '/fo' v['WINRC_SRC_F'] = '' if not winrc: conf.fatal('winrc was not found!') v['WINRCFLAGS'] = ''
gpl-3.0
mitocw/edx-platform
common/djangoapps/edxmako/tests.py
3
8555
import unittest import ddt from django.conf import settings from django.http import HttpResponse from django.test import TestCase from django.test.client import RequestFactory from django.test.utils import override_settings from django.urls import reverse from edx_django_utils.cache import RequestCache from mock import Mock, patch from edxmako import LOOKUP, add_lookup from edxmako.request_context import get_template_request_context from edxmako.shortcuts import is_any_marketing_link_set, is_marketing_link_set, marketing_link, render_to_string from student.tests.factories import UserFactory from util.testing import UrlResetMixin @ddt.ddt class ShortcutsTests(UrlResetMixin, TestCase): """ Test the edxmako shortcuts file """ @override_settings(MKTG_URLS={'ROOT': 'https://dummy-root', 'ABOUT': '/about-us'}) def test_marketing_link(self): with override_settings(MKTG_URL_LINK_MAP={'ABOUT': self._get_test_url_name()}): # test marketing site on with patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}): expected_link = 'https://dummy-root/about-us' link = marketing_link('ABOUT') self.assertEqual(link, expected_link) # test marketing site off with patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': False}): expected_link = reverse(self._get_test_url_name()) link = marketing_link('ABOUT') self.assertEqual(link, expected_link) @override_settings(MKTG_URLS={'ROOT': 'https://dummy-root', 'ABOUT': '/about-us'}) def test_is_marketing_link_set(self): with override_settings(MKTG_URL_LINK_MAP={'ABOUT': self._get_test_url_name()}): # test marketing site on with patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}): self.assertTrue(is_marketing_link_set('ABOUT')) self.assertFalse(is_marketing_link_set('NOT_CONFIGURED')) # test marketing site off with patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': False}): self.assertTrue(is_marketing_link_set('ABOUT')) self.assertFalse(is_marketing_link_set('NOT_CONFIGURED')) @override_settings(MKTG_URLS={'ROOT': 'https://dummy-root', 'ABOUT': '/about-us'}) def test_is_any_marketing_link_set(self): with override_settings(MKTG_URL_LINK_MAP={'ABOUT': self._get_test_url_name()}): # test marketing site on with patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}): self.assertTrue(is_any_marketing_link_set(['ABOUT'])) self.assertTrue(is_any_marketing_link_set(['ABOUT', 'NOT_CONFIGURED'])) self.assertFalse(is_any_marketing_link_set(['NOT_CONFIGURED'])) # test marketing site off with patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': False}): self.assertTrue(is_any_marketing_link_set(['ABOUT'])) self.assertTrue(is_any_marketing_link_set(['ABOUT', 'NOT_CONFIGURED'])) self.assertFalse(is_any_marketing_link_set(['NOT_CONFIGURED'])) def _get_test_url_name(self): if settings.ROOT_URLCONF == 'lms.urls': # return any lms url name return 'dashboard' else: # return any cms url name return 'organizations' @override_settings(MKTG_URLS={'ROOT': 'https://dummy-root', 'TOS': '/tos'}) @override_settings(MKTG_URL_OVERRIDES={'TOS': 'https://edx.org'}) def test_override_marketing_link_valid(self): expected_link = 'https://edx.org' # test marketing site on with patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}): link = marketing_link('TOS') self.assertEqual(link, expected_link) # test marketing site off with patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': False}): link = marketing_link('TOS') self.assertEqual(link, expected_link) @override_settings(MKTG_URLS={'ROOT': 'https://dummy-root', 'TOS': '/tos'}) @override_settings(MKTG_URL_OVERRIDES={'TOS': '123456'}) def test_override_marketing_link_invalid(self): expected_link = '#' # test marketing site on with patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}): link = marketing_link('TOS') self.assertEqual(link, expected_link) # test marketing site off with patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': False}): link = marketing_link('TOS') self.assertEqual(link, expected_link) class AddLookupTests(TestCase): """ Test the `add_lookup` function. """ @patch('edxmako.LOOKUP', {}) def test_with_package(self): add_lookup('test', 'management', __name__) dirs = LOOKUP['test'].directories self.assertEqual(len(dirs), 1) self.assertTrue(dirs[0].endswith('management')) class MakoRequestContextTest(TestCase): """ Test MakoMiddleware. """ def setUp(self): super(MakoRequestContextTest, self).setUp() self.user = UserFactory.create() self.url = "/" self.request = RequestFactory().get(self.url) self.request.user = self.user self.response = Mock(spec=HttpResponse) self.addCleanup(RequestCache.clear_all_namespaces) def test_with_current_request(self): """ Test that if get_current_request returns a request, then get_template_request_context returns a RequestContext. """ with patch('edxmako.request_context.get_current_request', return_value=self.request): # requestcontext should not be None. self.assertIsNotNone(get_template_request_context()) def test_without_current_request(self): """ Test that if get_current_request returns None, then get_template_request_context returns None. """ with patch('edxmako.request_context.get_current_request', return_value=None): # requestcontext should be None. self.assertIsNone(get_template_request_context()) def test_request_context_caching(self): """ Test that the RequestContext is cached in the RequestCache. """ with patch('edxmako.request_context.get_current_request', return_value=None): # requestcontext should be None, because the cache isn't filled self.assertIsNone(get_template_request_context()) with patch('edxmako.request_context.get_current_request', return_value=self.request): # requestcontext should not be None, and should fill the cache self.assertIsNotNone(get_template_request_context()) mock_get_current_request = Mock() with patch('edxmako.request_context.get_current_request'): with patch('edxmako.request_context.RequestContext.__init__') as mock_context_init: # requestcontext should not be None, because the cache is filled self.assertIsNotNone(get_template_request_context()) mock_context_init.assert_not_called() mock_get_current_request.assert_not_called() RequestCache.clear_all_namespaces() with patch('edxmako.request_context.get_current_request', return_value=None): # requestcontext should be None, because the cache isn't filled self.assertIsNone(get_template_request_context()) @unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms') def test_render_to_string_when_no_global_context_lms(self): """ Test render_to_string() when makomiddleware has not initialized the threadlocal REQUEST_CONTEXT.context. This is meant to run in LMS. """ self.assertIn("this module is temporarily unavailable", render_to_string("courseware/error-message.html", None)) @unittest.skipUnless(settings.ROOT_URLCONF == 'cms.urls', 'Test only valid in cms') def test_render_to_string_when_no_global_context_cms(self): """ Test render_to_string() when makomiddleware has not initialized the threadlocal REQUEST_CONTEXT.context. This is meant to run in CMS. """ self.assertIn("We're having trouble rendering your component", render_to_string("html_error.html", None))
agpl-3.0
chemelnucfin/tensorflow
tensorflow/python/data/kernel_tests/iterator_test.py
1
35277
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.Iterator`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import warnings from absl.testing import parameterized import numpy as np from tensorflow.core.protobuf import cluster_pb2 from tensorflow.core.protobuf import config_pb2 from tensorflow.python.client import session from tensorflow.python.compat import compat as forward_compat from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import iterator_ops from tensorflow.python.data.util import structure from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import function from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_spec from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import data_flow_ops from tensorflow.python.ops import functional_ops from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import math_ops from tensorflow.python.ops import parsing_ops from tensorflow.python.ops import script_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training import server_lib from tensorflow.python.util import compat class IteratorTest(test_base.DatasetTestBase, parameterized.TestCase): @test_util.deprecated_graph_mode_only def testNoGradients(self): component = constant_op.constant([1.]) side = constant_op.constant(0.) add = lambda x: x + side dataset = dataset_ops.Dataset.from_tensor_slices(component).map(add) value = dataset_ops.make_one_shot_iterator(dataset).get_next() self.assertIsNone(gradients_impl.gradients(value, component)[0]) self.assertIsNone(gradients_impl.gradients(value, side)[0]) self.assertIsNone(gradients_impl.gradients(value, [component, side])[0]) @test_util.deprecated_graph_mode_only def testCapturingStateInOneShotRaisesException(self): var = variables.Variable(37.0, name="myvar") dataset = ( dataset_ops.Dataset.from_tensor_slices([0.0, 1.0, 2.0]) .map(lambda x: x + var)) with self.assertRaisesRegexp( ValueError, r"`Dataset.make_one_shot_iterator\(\)` does not support " "datasets that capture stateful objects.+myvar"): dataset_ops.make_one_shot_iterator(dataset) @test_util.deprecated_graph_mode_only def testOneShotIterator(self): components = (np.arange(7), np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis], np.array(37.0) * np.arange(7)) def _map_fn(x, y, z): return math_ops.square(x), math_ops.square(y), math_ops.square(z) iterator = dataset_ops.make_one_shot_iterator( dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn) .repeat(14)) get_next = iterator.get_next() self.assertEqual([c.shape[1:] for c in components], [t.shape for t in get_next]) with self.cached_session() as sess: for _ in range(14): for i in range(7): result = sess.run(get_next) for component, result_component in zip(components, result): self.assertAllEqual(component[i]**2, result_component) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) @test_util.deprecated_graph_mode_only def testOneShotIteratorCaptureByValue(self): components = (np.arange(7), np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis], np.array(37.0) * np.arange(7)) tensor_components = tuple([ops.convert_to_tensor(c) for c in components]) def _map_fn(x, y, z): return math_ops.square(x), math_ops.square(y), math_ops.square(z) iterator = dataset_ops.make_one_shot_iterator( dataset_ops.Dataset.from_tensor_slices(tensor_components) .map(_map_fn).repeat(14)) get_next = iterator.get_next() self.assertEqual([c.shape[1:] for c in components], [t.shape for t in get_next]) with self.cached_session() as sess: for _ in range(14): for i in range(7): result = sess.run(get_next) for component, result_component in zip(components, result): self.assertAllEqual(component[i]**2, result_component) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) def testOneShotIteratorInsideContainer(self): components = (np.arange(7), np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis], np.array(37.0) * np.arange(7)) def within_container(): def _map_fn(x, y, z): return math_ops.square(x), math_ops.square(y), math_ops.square(z) iterator = dataset_ops.make_one_shot_iterator( dataset_ops.Dataset.from_tensor_slices(components) .map(_map_fn).repeat(14)) return iterator.get_next() server = server_lib.Server.create_local_server() # Create two iterators within unique containers, and run them to # make sure that the resources aren't shared. # # The test below would fail if cname were the same across both # sessions. for j in range(2): with session.Session(server.target) as sess: cname = "iteration%d" % j with ops.container(cname): get_next = within_container() for _ in range(14): for i in range(7): result = sess.run(get_next) for component, result_component in zip(components, result): self.assertAllEqual(component[i]**2, result_component) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) @test_util.deprecated_graph_mode_only def testOneShotIteratorNonBlocking(self): dataset = dataset_ops.Dataset.from_tensors([1, 2, 3]).map(lambda x: x * x) iterator = dataset_ops.make_one_shot_iterator(dataset) next_element = iterator.get_next() # Create a session with a single thread to ensure that the # one-shot iterator initializer does not deadlock. config = config_pb2.ConfigProto( inter_op_parallelism_threads=1, use_per_session_threads=True) with session.Session(config=config) as sess: self.assertAllEqual([1, 4, 9], sess.run(next_element)) with self.assertRaises(errors.OutOfRangeError): sess.run(next_element) # Test with multiple threads invoking the one-shot iterator concurrently. with session.Session(config=config) as sess: results = [] def consumer_thread(): try: results.append(sess.run(next_element)) except errors.OutOfRangeError: results.append(None) num_threads = 8 threads = [ self.checkedThread(consumer_thread) for _ in range(num_threads) ] for t in threads: t.start() for t in threads: t.join() self.assertEqual(num_threads, len(results)) self.assertEqual(num_threads - 1, len([None for r in results if r is None])) self.assertAllEqual([[1, 4, 9]], [r for r in results if r is not None]) @test_util.deprecated_graph_mode_only def testOneShotIteratorInitializerFails(self): # Define a dataset whose initialization will always fail. dataset = dataset_ops.Dataset.from_tensors(array_ops.gather([0], [4])) iterator = dataset_ops.make_one_shot_iterator(dataset) next_element = iterator.get_next() with self.cached_session() as sess: with self.assertRaisesRegexp(errors.InvalidArgumentError, ""): sess.run(next_element) # Test that subsequent attempts to use the iterator also fail. with self.assertRaisesRegexp(errors.InvalidArgumentError, ""): sess.run(next_element) with self.cached_session() as sess: def consumer_thread(): with self.assertRaisesRegexp(errors.InvalidArgumentError, ""): sess.run(next_element) num_threads = 8 threads = [ self.checkedThread(consumer_thread) for _ in range(num_threads) ] for t in threads: t.start() for t in threads: t.join() @test_util.deprecated_graph_mode_only def testSimpleSharedResource(self): components = (np.array(1, dtype=np.int64), np.array([1, 2, 3], dtype=np.int64), np.array(37.0, dtype=np.float64)) server = server_lib.Server.create_local_server() # Create two non-overlapping sessions that share the same iterator # resource on the same server, and verify that an action of the # first session (initializing the iterator) is visible in the # second session. with ops.Graph().as_default(): iterator = dataset_ops.make_initializable_iterator( dataset_ops.Dataset.from_tensors( components).map(lambda x, y, z: (x, y, z)), shared_name="shared_iterator") init_op = iterator.initializer get_next = iterator.get_next() with session.Session(server.target) as sess: sess.run(init_op) results = sess.run(get_next) for component, result_component in zip(components, results): self.assertAllEqual(component, result_component) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) # Re-initialize the iterator in the first session. sess.run(init_op) with ops.Graph().as_default(): # Re-define the iterator manually, without defining any of the # functions in this graph, to ensure that we are not # accidentally redefining functions with the same names in the # new graph. iterator = iterator_ops.Iterator.from_structure( shared_name="shared_iterator", output_types=(dtypes.int64, dtypes.int64, dtypes.float64), output_shapes=([], [3], [])) get_next = iterator.get_next() with session.Session(server.target) as sess: # Use the iterator without re-initializing in the second session. results = sess.run(get_next) for component, result_component in zip(components, results): self.assertAllEqual(component, result_component) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) @test_util.deprecated_graph_mode_only def testNotInitializedError(self): components = (np.array(1), np.array([1, 2, 3]), np.array(37.0)) iterator = dataset_ops.make_initializable_iterator( dataset_ops.Dataset.from_tensors(components)) get_next = iterator.get_next() with self.cached_session() as sess: with self.assertRaisesRegexp(errors.FailedPreconditionError, "iterator has not been initialized"): sess.run(get_next) @test_util.deprecated_graph_mode_only def testReinitializableIterator(self): dataset_3 = dataset_ops.Dataset.from_tensors( constant_op.constant([1, 2, 3])) dataset_4 = dataset_ops.Dataset.from_tensors( constant_op.constant([4, 5, 6, 7])) iterator = iterator_ops.Iterator.from_structure( dataset_ops.get_legacy_output_types(dataset_3), [None]) dataset_3_init_op = iterator.make_initializer(dataset_3) dataset_4_init_op = iterator.make_initializer(dataset_4) get_next = iterator.get_next() self.assertEqual( dataset_ops.get_legacy_output_types(dataset_3), dataset_ops.get_legacy_output_types(iterator)) self.assertEqual( dataset_ops.get_legacy_output_types(dataset_4), dataset_ops.get_legacy_output_types(iterator)) self.assertEqual( [None], dataset_ops.get_legacy_output_shapes(iterator).as_list()) with self.cached_session() as sess: # The iterator is initially uninitialized. with self.assertRaises(errors.FailedPreconditionError): sess.run(get_next) # Initialize with one dataset. sess.run(dataset_3_init_op) self.assertAllEqual([1, 2, 3], sess.run(get_next)) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) # Initialize with a different dataset. sess.run(dataset_4_init_op) self.assertAllEqual([4, 5, 6, 7], sess.run(get_next)) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) # Reinitialize with the first dataset. sess.run(dataset_3_init_op) self.assertAllEqual([1, 2, 3], sess.run(get_next)) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) @test_util.deprecated_graph_mode_only def testReinitializableIteratorWithFunctions(self): def g(): for i in range(10): yield i iterator = iterator_ops.Iterator.from_structure(dtypes.int64, []) next_element = iterator.get_next() with self.cached_session() as sess: dataset_1 = dataset_ops.Dataset.from_generator( g, output_types=dtypes.int64) sess.run(iterator.make_initializer(dataset_1)) for expected in range(10): self.assertEqual(expected, sess.run(next_element)) with self.assertRaises(errors.OutOfRangeError): sess.run(next_element) dataset_2 = dataset_ops.Dataset.from_generator( g, output_types=dtypes.int64) sess.run(iterator.make_initializer(dataset_2)) for expected in range(10): self.assertEqual(expected, sess.run(next_element)) with self.assertRaises(errors.OutOfRangeError): sess.run(next_element) def testReinitializableIteratorStaticErrors(self): # Non-matching structure for types and shapes. with self.assertRaises(TypeError): iterator = iterator_ops.Iterator.from_structure( (dtypes.int64, dtypes.float64), [None]) # Test validation of dataset argument. iterator = iterator_ops.Iterator.from_structure((dtypes.int64, dtypes.float64)) # Incompatible structure. with self.assertRaises(ValueError): iterator.make_initializer( dataset_ops.Dataset.from_tensors(((constant_op.constant( [1, 2, 3], dtype=dtypes.int64),), (constant_op.constant( [4., 5., 6., 7.], dtype=dtypes.float64),)))) # Incompatible types. with self.assertRaises(TypeError): iterator.make_initializer( dataset_ops.Dataset.from_tensors( (constant_op.constant([1, 2, 3], dtype=dtypes.int32), constant_op.constant([4., 5., 6., 7.], dtype=dtypes.float32)))) # Incompatible shapes. iterator = iterator_ops.Iterator.from_structure( (dtypes.int64, dtypes.float64), ([None], [])) with self.assertRaises(TypeError): iterator.make_initializer( dataset_ops.Dataset.from_tensors( (constant_op.constant([1, 2, 3], dtype=dtypes.int64), constant_op.constant([4., 5., 6., 7.], dtype=dtypes.float64)))) @test_util.deprecated_graph_mode_only def testIteratorStringHandle(self): dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3]) dataset_4 = dataset_ops.Dataset.from_tensor_slices([10, 20, 30, 40]) iterator_3 = dataset_ops.make_one_shot_iterator(dataset_3) iterator_4 = dataset_ops.make_one_shot_iterator(dataset_4) handle_placeholder = array_ops.placeholder(dtypes.string, shape=[]) feedable_iterator = iterator_ops.Iterator.from_string_handle( handle_placeholder, dataset_ops.get_legacy_output_types(dataset_3), dataset_ops.get_legacy_output_shapes(dataset_3)) next_element = feedable_iterator.get_next() self.assertTrue( structure.are_compatible( dataset_ops.get_structure(dataset_3), dataset_ops.get_structure(feedable_iterator))) with self.cached_session() as sess: iterator_3_handle = sess.run(iterator_3.string_handle()) iterator_4_handle = sess.run(iterator_4.string_handle()) self.assertEqual(10, sess.run( next_element, feed_dict={handle_placeholder: iterator_4_handle})) self.assertEqual(1, sess.run( next_element, feed_dict={handle_placeholder: iterator_3_handle})) self.assertEqual(20, sess.run( next_element, feed_dict={handle_placeholder: iterator_4_handle})) self.assertEqual(2, sess.run( next_element, feed_dict={handle_placeholder: iterator_3_handle})) self.assertEqual(30, sess.run( next_element, feed_dict={handle_placeholder: iterator_4_handle})) self.assertEqual(3, sess.run( next_element, feed_dict={handle_placeholder: iterator_3_handle})) self.assertEqual(40, sess.run( next_element, feed_dict={handle_placeholder: iterator_4_handle})) with self.assertRaises(errors.OutOfRangeError): sess.run( next_element, feed_dict={handle_placeholder: iterator_3_handle}) with self.assertRaises(errors.OutOfRangeError): sess.run( next_element, feed_dict={handle_placeholder: iterator_4_handle}) @test_util.deprecated_graph_mode_only def testIteratorStringHandleFuture(self): with forward_compat.forward_compatibility_horizon(2018, 8, 4): dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3]) dataset_4 = dataset_ops.Dataset.from_tensor_slices([10, 20, 30, 40]) iterator_3 = dataset_ops.make_one_shot_iterator(dataset_3) iterator_4 = dataset_ops.make_one_shot_iterator(dataset_4) handle_placeholder = array_ops.placeholder(dtypes.string, shape=[]) feedable_iterator = iterator_ops.Iterator.from_string_handle( handle_placeholder, dataset_ops.get_legacy_output_types(dataset_3), dataset_ops.get_legacy_output_shapes(dataset_3)) next_element = feedable_iterator.get_next() self.assertTrue( structure.are_compatible( dataset_ops.get_structure(dataset_3), dataset_ops.get_structure(feedable_iterator))) with self.cached_session() as sess: iterator_3_handle = sess.run(iterator_3.string_handle()) iterator_4_handle = sess.run(iterator_4.string_handle()) self.assertEqual( 10, sess.run( next_element, feed_dict={handle_placeholder: iterator_4_handle})) self.assertEqual( 1, sess.run( next_element, feed_dict={handle_placeholder: iterator_3_handle})) self.assertEqual( 20, sess.run( next_element, feed_dict={handle_placeholder: iterator_4_handle})) self.assertEqual( 2, sess.run( next_element, feed_dict={handle_placeholder: iterator_3_handle})) self.assertEqual( 30, sess.run( next_element, feed_dict={handle_placeholder: iterator_4_handle})) self.assertEqual( 3, sess.run( next_element, feed_dict={handle_placeholder: iterator_3_handle})) self.assertEqual( 40, sess.run( next_element, feed_dict={handle_placeholder: iterator_4_handle})) with self.assertRaises(errors.OutOfRangeError): sess.run( next_element, feed_dict={handle_placeholder: iterator_3_handle}) with self.assertRaises(errors.OutOfRangeError): sess.run( next_element, feed_dict={handle_placeholder: iterator_4_handle}) @test_util.deprecated_graph_mode_only def testIteratorStringHandleReuseTensorObject(self): dataset = dataset_ops.Dataset.from_tensor_slices([1, 2, 3]) one_shot_iterator = dataset_ops.make_one_shot_iterator(dataset) initializable_iterator = dataset_ops.make_initializable_iterator(dataset) structure_iterator = iterator_ops.Iterator.from_structure( dataset_ops.get_legacy_output_types(dataset)) created_ops = len(ops.get_default_graph().get_operations()) self.assertIs(one_shot_iterator.string_handle(), one_shot_iterator.string_handle()) self.assertIs(initializable_iterator.string_handle(), initializable_iterator.string_handle()) self.assertIs(structure_iterator.string_handle(), structure_iterator.string_handle()) # Assert that getting the (default) string handle creates no ops. self.assertEqual(created_ops, len(ops.get_default_graph().get_operations())) # Specifying an explicit name will create a new op. handle_with_name = one_shot_iterator.string_handle(name="foo") self.assertEqual("foo", handle_with_name.op.name) self.assertIsNot(one_shot_iterator.string_handle(), handle_with_name) handle_with_same_name = one_shot_iterator.string_handle(name="foo") self.assertEqual("foo_1", handle_with_same_name.op.name) self.assertIsNot(handle_with_name, handle_with_same_name) @test_util.deprecated_graph_mode_only def testIteratorStringHandleError(self): dataset_int_scalar = ( dataset_ops.Dataset.from_tensor_slices([1, 2, 3]).repeat()) dataset_float_vector = (dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0])) handle_placeholder = array_ops.placeholder(dtypes.string, shape=[]) feedable_int_scalar = iterator_ops.Iterator.from_string_handle( handle_placeholder, dtypes.int32, []) feedable_int_vector = iterator_ops.Iterator.from_string_handle( handle_placeholder, dtypes.int32, [None]) feedable_int_any = iterator_ops.Iterator.from_string_handle( handle_placeholder, dtypes.int32) with self.cached_session() as sess: handle_int_scalar = sess.run(dataset_ops.make_one_shot_iterator( dataset_int_scalar).string_handle()) handle_float_vector = sess.run(dataset_ops.make_one_shot_iterator( dataset_float_vector).string_handle()) self.assertEqual(1, sess.run( feedable_int_scalar.get_next(), feed_dict={handle_placeholder: handle_int_scalar})) self.assertEqual(2, sess.run( feedable_int_any.get_next(), feed_dict={handle_placeholder: handle_int_scalar})) with self.assertRaises(errors.InvalidArgumentError): print(sess.run( feedable_int_vector.get_next(), feed_dict={handle_placeholder: handle_int_scalar})) with self.assertRaises(errors.InvalidArgumentError): print(sess.run( feedable_int_vector.get_next(), feed_dict={handle_placeholder: handle_float_vector})) @test_util.deprecated_graph_mode_only def testRemoteIteratorUsingRemoteCallOpDirectSession(self): worker_config = config_pb2.ConfigProto() worker_config.device_count["CPU"] = 3 with ops.device("/job:localhost/replica:0/task:0/cpu:1"): dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3]) iterator_3 = dataset_ops.make_one_shot_iterator(dataset_3) iterator_3_handle = iterator_3.string_handle() @function.Defun(dtypes.string) def _remote_fn(h): remote_iterator = iterator_ops.Iterator.from_string_handle( h, dataset_ops.get_legacy_output_types(dataset_3), dataset_ops.get_legacy_output_shapes(dataset_3)) return remote_iterator.get_next() with ops.device("/job:localhost/replica:0/task:0/cpu:0"): target_placeholder = array_ops.placeholder(dtypes.string, shape=[]) remote_op = functional_ops.remote_call( args=[iterator_3_handle], Tout=[dtypes.int32], f=_remote_fn, target=target_placeholder) with self.session(config=worker_config) as sess: elem = sess.run( remote_op, feed_dict={ target_placeholder: "/job:localhost/replica:0/task:0/cpu:1" }) self.assertEqual(elem, [1]) # Fails when target is cpu:2 where the resource is not located. with self.assertRaises(errors.InvalidArgumentError): sess.run( remote_op, feed_dict={ target_placeholder: "/job:localhost/replica:0/task:0/cpu:2" }) elem = sess.run( remote_op, feed_dict={ target_placeholder: "/job:localhost/replica:0/task:0/cpu:1" }) self.assertEqual(elem, [2]) elem = sess.run( remote_op, feed_dict={ target_placeholder: "/job:localhost/replica:0/task:0/cpu:1" }) self.assertEqual(elem, [3]) with self.assertRaises(errors.OutOfRangeError): sess.run( remote_op, feed_dict={ target_placeholder: "/job:localhost/replica:0/task:0/cpu:1" }) @test_util.deprecated_graph_mode_only def testRemoteIteratorUsingRemoteCallOpMultiWorkers(self): s1 = server_lib.Server.create_local_server() s2 = server_lib.Server.create_local_server() s3 = server_lib.Server.create_local_server() cluster_def = cluster_pb2.ClusterDef() workers = cluster_def.job.add() workers.name = "worker" workers.tasks[0] = s1.target[len("grpc://"):] workers.tasks[1] = s2.target[len("grpc://"):] client = cluster_def.job.add() client.name = "client" client.tasks[0] = s3.target[len("grpc://"):] config = config_pb2.ConfigProto(cluster_def=cluster_def) worker_devices = [ "/job:worker/replica:0/task:%d/cpu:0" % i for i in range(2) ] itr_handles = [] for device in worker_devices: with ops.device(device): src = dataset_ops.Dataset.from_tensor_slices([device]) itr = dataset_ops.make_one_shot_iterator(src) itr_handles.append(itr.string_handle()) targets = dataset_ops.Dataset.from_tensor_slices(worker_devices) handles = dataset_ops.Dataset.from_tensor_slices(itr_handles) @function.Defun(dtypes.string) def loading_func(h): remote_itr = iterator_ops.Iterator.from_string_handle( h, dataset_ops.get_legacy_output_types(itr), dataset_ops.get_legacy_output_shapes(itr)) return remote_itr.get_next() def map_fn(target, handle): return functional_ops.remote_call( args=[handle], Tout=[dtypes.string], f=loading_func, target=target) with ops.device("/job:client"): client_dataset = dataset_ops.Dataset.zip((targets, handles)).map(map_fn) itr = dataset_ops.make_initializable_iterator(client_dataset) n = itr.get_next() with session.Session(s3.target, config=config) as sess: sess.run(itr.initializer) expected_values = worker_devices for expected in expected_values: self.assertEqual((compat.as_bytes(expected),), sess.run(n)) with self.assertRaises(errors.OutOfRangeError): sess.run(n) @test_util.deprecated_graph_mode_only def testRemoteIteratorUsingRemoteCallOpDirectSessionGPUCPU(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") with ops.device("/job:localhost/replica:0/task:0/cpu:0"): dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3]) iterator_3 = dataset_ops.make_one_shot_iterator(dataset_3) iterator_3_handle = iterator_3.string_handle() def _encode_raw(byte_array): return bytes(bytearray(byte_array)) @function.Defun(dtypes.uint8) def _remote_fn(h): handle = script_ops.py_func(_encode_raw, [h], dtypes.string) remote_iterator = iterator_ops.Iterator.from_string_handle( handle, dataset_ops.get_legacy_output_types(dataset_3), dataset_ops.get_legacy_output_shapes(dataset_3)) return remote_iterator.get_next() with ops.device("/job:localhost/replica:0/task:0/device:GPU:0"): target_placeholder = array_ops.placeholder(dtypes.string, shape=[]) iterator_3_handle_uint8 = parsing_ops.decode_raw( input_bytes=iterator_3_handle, out_type=dtypes.uint8) remote_op = functional_ops.remote_call( args=[iterator_3_handle_uint8], Tout=[dtypes.int32], f=_remote_fn, target=target_placeholder) with self.cached_session() as sess: elem = sess.run( remote_op, feed_dict={ target_placeholder: "/job:localhost/replica:0/task:0/cpu:0" }) self.assertEqual(elem, [1]) elem = sess.run( remote_op, feed_dict={ target_placeholder: "/job:localhost/replica:0/task:0/cpu:0" }) self.assertEqual(elem, [2]) elem = sess.run( remote_op, feed_dict={ target_placeholder: "/job:localhost/replica:0/task:0/cpu:0" }) self.assertEqual(elem, [3]) with self.assertRaises(errors.OutOfRangeError): sess.run( remote_op, feed_dict={ target_placeholder: "/job:localhost/replica:0/task:0/cpu:0" }) @test_util.deprecated_graph_mode_only def testRepeatedGetNextWarning(self): iterator = dataset_ops.make_one_shot_iterator(dataset_ops.Dataset.range(10)) warnings.simplefilter("always") with warnings.catch_warnings(record=True) as w: for _ in range(100): iterator.get_next() self.assertEqual(100 - iterator_ops.GET_NEXT_CALL_WARNING_THRESHOLD, len(w)) for warning in w: self.assertIn( iterator_ops.GET_NEXT_CALL_WARNING_MESSAGE, str(warning.message)) # pylint: disable=g-long-lambda @parameterized.named_parameters( ("Tensor", lambda: constant_op.constant(37.0), tensor_spec.TensorSpec([], dtypes.float32), ops.Tensor, dtypes.float32, []), ("SparseTensor", lambda: sparse_tensor.SparseTensor( indices=[[0]], values=constant_op.constant([0], dtype=dtypes.int32), dense_shape=[1]), sparse_tensor.SparseTensorSpec([1], dtypes.int32), sparse_tensor.SparseTensor, dtypes.int32, [1]), ("Nest", lambda: { "a": constant_op.constant(37.0), "b": (constant_op.constant(["Foo"]), constant_op.constant("Bar")) }, { "a": tensor_spec.TensorSpec([], dtypes.float32), "b": (tensor_spec.TensorSpec( [1], dtypes.string), tensor_spec.TensorSpec([], dtypes.string)) }, { "a": ops.Tensor, "b": (ops.Tensor, ops.Tensor) }, { "a": dtypes.float32, "b": (dtypes.string, dtypes.string) }, { "a": [], "b": ([1], []) }), ) def testIteratorStructure(self, tf_value_fn, expected_element_structure, expected_output_classes, expected_output_types, expected_output_shapes): tf_value = tf_value_fn() iterator = dataset_ops.make_one_shot_iterator( dataset_ops.Dataset.from_tensors(tf_value)) self.assertTrue( structure.are_compatible( dataset_ops.get_structure(iterator), expected_element_structure)) self.assertEqual(expected_output_classes, dataset_ops.get_legacy_output_classes(iterator)) self.assertEqual(expected_output_types, dataset_ops.get_legacy_output_types(iterator)) self.assertEqual(expected_output_shapes, dataset_ops.get_legacy_output_shapes(iterator)) def testIteratorGetNextName(self): with ops.Graph().as_default(): iterator = dataset_ops.make_one_shot_iterator( dataset_ops.Dataset.from_tensors(37.0)) next_element = iterator.get_next(name="overridden_name") self.assertEqual("overridden_name", next_element.op.name) @parameterized.named_parameters( ("Async", context.ASYNC), ("Sync", context.SYNC), ) def testIteratorEagerIteration(self, execution_mode): with context.eager_mode(), context.execution_mode(execution_mode): val = 0 dataset = dataset_ops.Dataset.range(10) iterator = iter(dataset) for foo in iterator: self.assertEqual(val, foo.numpy()) val += 1 @test_util.run_v2_only def testIteratorV2Function(self): queue = data_flow_ops.FIFOQueue(10, dtypes.int64) @def_function.function def fn(): dataset = dataset_ops.Dataset.range(10) iterator = iter(dataset) for _ in range(10): queue.enqueue(next(iterator)) fn() for i in range(10): self.assertEqual(queue.dequeue().numpy(), i) @test_util.run_v2_only def testIteratorV2FunctionError(self): # In this test we verify that a function that raises an error ends up # properly deallocating the iterator resource. queue = data_flow_ops.FIFOQueue(10, dtypes.int64) queue.enqueue(0) def init_fn(n): return n def next_fn(_): ds = dataset_ops.Dataset.range(0) return next(iter(ds)) def finalize_fn(n): queue.enqueue(0) return n @def_function.function def fn(): dataset = dataset_ops._GeneratorDataset(1, init_fn, next_fn, finalize_fn) iterator = iter(dataset) next(iterator) with self.assertRaises(errors.OutOfRangeError): fn() self.assertEqual(queue.size().numpy(), 2) @test_util.run_v2_only def testLimitedRetracing(self): trace_count = [0] @def_function.function def f(iterator): trace_count[0] += 1 counter = np.int64(0) for elem in iterator: counter += elem return counter dataset = dataset_ops.Dataset.range(5) dataset2 = dataset_ops.Dataset.range(10) for _ in range(10): self.assertEqual(self.evaluate(f(iter(dataset))), 10) self.assertEqual(self.evaluate(f(iter(dataset2))), 45) self.assertEqual(trace_count[0], 1) if __name__ == "__main__": test.main()
apache-2.0
mikel-egana-aranguren/SADI-Galaxy-Docker
galaxy-dist/lib/galaxy/objectstore/__init__.py
2
29544
""" objectstore package, abstraction for storing blobs of data for use in Galaxy, all providers ensure that data can be accessed on the filesystem for running tools """ import os import random import shutil import logging import threading from xml.etree import ElementTree from galaxy.util import umask_fix_perms, force_symlink from galaxy.exceptions import ObjectInvalid, ObjectNotFound from galaxy.util.sleeper import Sleeper from galaxy.util.directory_hash import directory_hash_id from galaxy.util.odict import odict try: from sqlalchemy.orm import object_session except ImportError: object_session = None NO_SESSION_ERROR_MESSAGE = "Attempted to 'create' object store entity in configuration with no database session present." log = logging.getLogger( __name__ ) class ObjectStore(object): """ ObjectStore abstract interface """ def __init__(self, config, config_xml=None, **kwargs): self.running = True self.extra_dirs = {} def shutdown(self): self.running = False def exists(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None): """ Returns True if the object identified by `obj` exists in this file store, False otherwise. FIELD DESCRIPTIONS (these apply to all the methods in this class): :type obj: object :param obj: A Galaxy object with an assigned database ID accessible via the .id attribute. :type base_dir: string :param base_dir: A key in self.extra_dirs corresponding to the base directory in which this object should be created, or None to specify the default directory. :type dir_only: bool :param dir_only: If True, check only the path where the file identified by `obj` should be located, not the dataset itself. This option applies to `extra_dir` argument as well. :type extra_dir: string :param extra_dir: Append `extra_dir` to the directory structure where the dataset identified by `obj` should be located. (e.g., 000/extra_dir/obj.id) :type extra_dir_at_root: bool :param extra_dir_at_root: Applicable only if `extra_dir` is set. If True, the `extra_dir` argument is placed at root of the created directory structure rather than at the end (e.g., extra_dir/000/obj.id vs. 000/extra_dir/obj.id) :type alt_name: string :param alt_name: Use this name as the alternative name for the created dataset rather than the default. """ raise NotImplementedError() def file_ready(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None): """ A helper method that checks if a file corresponding to a dataset is ready and available to be used. Return True if so, False otherwise.""" return True def create(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None): """ Mark the object identified by `obj` as existing in the store, but with no content. This method will create a proper directory structure for the file if the directory does not already exist. See `exists` method for the description of other fields. """ raise NotImplementedError() def empty(self, obj, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None): """ Test if the object identified by `obj` has content. If the object does not exist raises `ObjectNotFound`. See `exists` method for the description of the fields. """ raise NotImplementedError() def size(self, obj, extra_dir=None, extra_dir_at_root=False, alt_name=None): """ Return size of the object identified by `obj`. If the object does not exist, return 0. See `exists` method for the description of the fields. """ raise NotImplementedError() def delete(self, obj, entire_dir=False, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None): """ Deletes the object identified by `obj`. See `exists` method for the description of other fields. :type entire_dir: bool :param entire_dir: If True, delete the entire directory pointed to by extra_dir. For safety reasons, this option applies only for and in conjunction with the extra_dir option. """ raise NotImplementedError() def get_data(self, obj, start=0, count=-1, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None): """ Fetch `count` bytes of data starting at offset `start` from the object identified uniquely by `obj`. If the object does not exist raises `ObjectNotFound`. See `exists` method for the description of other fields. :type start: int :param start: Set the position to start reading the dataset file :type count: int :param count: Read at most `count` bytes from the dataset """ raise NotImplementedError() def get_filename(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None): """ Get the expected filename (including the absolute path) which can be used to access the contents of the object uniquely identified by `obj`. See `exists` method for the description of the fields. """ raise NotImplementedError() def update_from_file(self, obj, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, file_name=None, create=False): """ Inform the store that the file associated with the object has been updated. If `file_name` is provided, update from that file instead of the default. If the object does not exist raises `ObjectNotFound`. See `exists` method for the description of other fields. :type file_name: string :param file_name: Use file pointed to by `file_name` as the source for updating the dataset identified by `obj` :type create: bool :param create: If True and the default dataset does not exist, create it first. """ raise NotImplementedError() def get_object_url(self, obj, extra_dir=None, extra_dir_at_root=False, alt_name=None): """ If the store supports direct URL access, return a URL. Otherwise return None. Note: need to be careful to to bypass dataset security with this. See `exists` method for the description of the fields. """ raise NotImplementedError() def get_store_usage_percent(self): """ Return the percentage indicating how full the store is """ raise NotImplementedError() ## def get_staging_command( id ): ## """ ## Return a shell command that can be prepended to the job script to stage the ## dataset -- runs on worker nodes. ## ## Note: not sure about the interface here. Should this return a filename, command ## tuple? Is this even a good idea, seems very useful for S3, other object stores? ## """ class DiskObjectStore(ObjectStore): """ Standard Galaxy object store, stores objects in files under a specific directory on disk. >>> from galaxy.util.bunch import Bunch >>> import tempfile >>> file_path=tempfile.mkdtemp() >>> obj = Bunch(id=1) >>> s = DiskObjectStore(Bunch(umask=077, job_working_directory=file_path, new_file_path=file_path, object_store_check_old_style=False), file_path=file_path) >>> s.create(obj) >>> s.exists(obj) True >>> assert s.get_filename(obj) == file_path + '/000/dataset_1.dat' """ def __init__(self, config, config_xml=None, file_path=None, extra_dirs=None): super(DiskObjectStore, self).__init__(config, config_xml=None, file_path=file_path, extra_dirs=extra_dirs) self.file_path = file_path or config.file_path self.config = config self.check_old_style = config.object_store_check_old_style self.extra_dirs['job_work'] = config.job_working_directory self.extra_dirs['temp'] = config.new_file_path #The new config_xml overrides universe settings. if config_xml is not None: for e in config_xml: if e.tag == 'files_dir': self.file_path = e.get('path') else: self.extra_dirs[e.tag] = e.get('path') if extra_dirs is not None: self.extra_dirs.update( extra_dirs ) def _get_filename(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None): """Class method that returns the absolute path for the file corresponding to the `obj`.id regardless of whether the file exists. """ path = self._construct_path(obj, base_dir=base_dir, dir_only=dir_only, extra_dir=extra_dir, extra_dir_at_root=extra_dir_at_root, alt_name=alt_name, old_style=True) # For backward compatibility, check the old style root path first; otherwise, # construct hashed path if not os.path.exists(path): return self._construct_path(obj, base_dir=base_dir, dir_only=dir_only, extra_dir=extra_dir, extra_dir_at_root=extra_dir_at_root, alt_name=alt_name) # TODO: rename to _disk_path or something like that to avoid conflicts with children that'll use the local_extra_dirs decorator, e.g. S3 def _construct_path(self, obj, old_style=False, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None, **kwargs): """ Construct the expected absolute path for accessing the object identified by `obj`.id. :type base_dir: string :param base_dir: A key in self.extra_dirs corresponding to the base directory in which this object should be created, or None to specify the default directory. :type dir_only: bool :param dir_only: If True, check only the path where the file identified by `obj` should be located, not the dataset itself. This option applies to `extra_dir` argument as well. :type extra_dir: string :param extra_dir: Append the value of this parameter to the expected path used to access the object identified by `obj` (e.g., /files/000/<extra_dir>/dataset_10.dat). :type alt_name: string :param alt_name: Use this name as the alternative name for the returned dataset rather than the default. :type old_style: bool param old_style: This option is used for backward compatibility. If True the composed directory structure does not include a hash id (e.g., /files/dataset_10.dat (old) vs. /files/000/dataset_10.dat (new)) """ base = self.extra_dirs.get(base_dir, self.file_path) if old_style: if extra_dir is not None: path = os.path.join(base, extra_dir) else: path = base else: # Construct hashed path rel_path = os.path.join(*directory_hash_id(obj.id)) # Optionally append extra_dir if extra_dir is not None: if extra_dir_at_root: rel_path = os.path.join(extra_dir, rel_path) else: rel_path = os.path.join(rel_path, extra_dir) path = os.path.join(base, rel_path) if not dir_only: path = os.path.join(path, alt_name if alt_name else "dataset_%s.dat" % obj.id) return os.path.abspath(path) def exists(self, obj, **kwargs): if self.check_old_style: path = self._construct_path(obj, old_style=True, **kwargs) # For backward compatibility, check root path first; otherwise, construct # and check hashed path if os.path.exists(path): return True return os.path.exists(self._construct_path(obj, **kwargs)) def create(self, obj, **kwargs): if not self.exists(obj, **kwargs): path = self._construct_path(obj, **kwargs) dir_only = kwargs.get('dir_only', False) # Create directory if it does not exist dir = path if dir_only else os.path.dirname(path) if not os.path.exists(dir): os.makedirs(dir) # Create the file if it does not exist if not dir_only: open(path, 'w').close() # Should be rb? umask_fix_perms(path, self.config.umask, 0666) def empty(self, obj, **kwargs): return os.path.getsize(self.get_filename(obj, **kwargs)) == 0 def size(self, obj, **kwargs): if self.exists(obj, **kwargs): try: return os.path.getsize(self.get_filename(obj, **kwargs)) except OSError: return 0 else: return 0 def delete(self, obj, entire_dir=False, **kwargs): path = self.get_filename(obj, **kwargs) extra_dir = kwargs.get('extra_dir', None) try: if entire_dir and extra_dir: shutil.rmtree(path) return True if self.exists(obj, **kwargs): os.remove(path) return True except OSError, ex: log.critical('%s delete error %s' % (self._get_filename(obj, **kwargs), ex)) return False def get_data(self, obj, start=0, count=-1, **kwargs): data_file = open(self.get_filename(obj, **kwargs), 'r') # Should be rb? data_file.seek(start) content = data_file.read(count) data_file.close() return content def get_filename(self, obj, **kwargs): if self.check_old_style: path = self._construct_path(obj, old_style=True, **kwargs) # For backward compatibility, check root path first; otherwise, construct # and return hashed path if os.path.exists(path): return path return self._construct_path(obj, **kwargs) def update_from_file(self, obj, file_name=None, create=False, **kwargs): """ `create` parameter is not used in this implementation """ preserve_symlinks = kwargs.pop( 'preserve_symlinks', False ) #FIXME: symlinks and the object store model may not play well together #these should be handled better, e.g. registering the symlink'd file as an object if create: self.create(obj, **kwargs) if file_name and self.exists(obj, **kwargs): try: if preserve_symlinks and os.path.islink( file_name ): force_symlink( os.readlink( file_name ), self.get_filename( obj, **kwargs ) ) else: shutil.copy( file_name, self.get_filename( obj, **kwargs ) ) except IOError, ex: log.critical('Error copying %s to %s: %s' % (file_name, self._get_filename(obj, **kwargs), ex)) raise ex def get_object_url(self, obj, **kwargs): return None def get_store_usage_percent(self): st = os.statvfs(self.file_path) return ( float( st.f_blocks - st.f_bavail ) / st.f_blocks ) * 100 class CachingObjectStore(ObjectStore): """ Object store that uses a directory for caching files, but defers and writes back to another object store. """ def __init__(self, path, backend): super(CachingObjectStore, self).__init__(self, path, backend) class NestedObjectStore(ObjectStore): """ Base for ObjectStores that use other ObjectStores (DistributedObjectStore, HierarchicalObjectStore) """ def __init__(self, config, config_xml=None): super(NestedObjectStore, self).__init__(config, config_xml=config_xml) self.backends = {} def shutdown(self): for store in self.backends.values(): store.shutdown() super(NestedObjectStore, self).shutdown() def exists(self, obj, **kwargs): return self.__call_method('exists', obj, False, False, **kwargs) def file_ready(self, obj, **kwargs): return self.__call_method('file_ready', obj, False, False, **kwargs) def create(self, obj, **kwargs): random.choice(self.backends.values()).create(obj, **kwargs) def empty(self, obj, **kwargs): return self.__call_method('empty', obj, True, False, **kwargs) def size(self, obj, **kwargs): return self.__call_method('size', obj, 0, False, **kwargs) def delete(self, obj, **kwargs): return self.__call_method('delete', obj, False, False, **kwargs) def get_data(self, obj, **kwargs): return self.__call_method('get_data', obj, ObjectNotFound, True, **kwargs) def get_filename(self, obj, **kwargs): return self.__call_method('get_filename', obj, ObjectNotFound, True, **kwargs) def update_from_file(self, obj, **kwargs): if kwargs.get('create', False): self.create(obj, **kwargs) kwargs['create'] = False return self.__call_method('update_from_file', obj, ObjectNotFound, True, **kwargs) def get_object_url(self, obj, **kwargs): return self.__call_method('get_object_url', obj, None, False, **kwargs) def __call_method(self, method, obj, default, default_is_exception, **kwargs): """ Check all children object stores for the first one with the dataset """ for key, store in self.backends.items(): if store.exists(obj, **kwargs): return store.__getattribute__(method)(obj, **kwargs) if default_is_exception: raise default( 'objectstore, __call_method failed: %s on %s, kwargs: %s' % ( method, str( obj ), str( kwargs ) ) ) else: return default class DistributedObjectStore(NestedObjectStore): """ ObjectStore that defers to a list of backends, for getting objects the first store where the object exists is used, objects are created in a store selected randomly, but with weighting. """ def __init__(self, config, config_xml=None, fsmon=False): super(DistributedObjectStore, self).__init__(config, config_xml=config_xml) if config_xml is None: self.distributed_config = config.distributed_object_store_config_file assert self.distributed_config is not None, "distributed object store ('object_store = distributed') " \ "requires a config file, please set one in " \ "'distributed_object_store_config_file')" self.backends = {} self.weighted_backend_ids = [] self.original_weighted_backend_ids = [] self.max_percent_full = {} self.global_max_percent_full = 0.0 random.seed() self.__parse_distributed_config(config, config_xml) self.sleeper = None if fsmon and ( self.global_max_percent_full or filter( lambda x: x != 0.0, self.max_percent_full.values() ) ): self.sleeper = Sleeper() self.filesystem_monitor_thread = threading.Thread(target=self.__filesystem_monitor) self.filesystem_monitor_thread.setDaemon( True ) self.filesystem_monitor_thread.start() log.info("Filesystem space monitor started") def __parse_distributed_config(self, config, config_xml=None): if config_xml is None: root = ElementTree.parse(self.distributed_config).getroot() log.debug('Loading backends for distributed object store from %s' % self.distributed_config) else: root = config_xml.find('backends') log.debug('Loading backends for distributed object store from %s' % config_xml.get('id')) self.global_max_percent_full = float(root.get('maxpctfull', 0)) for elem in [ e for e in root if e.tag == 'backend' ]: id = elem.get('id') weight = int(elem.get('weight', 1)) maxpctfull = float(elem.get('maxpctfull', 0)) if elem.get('type', 'disk'): path = None extra_dirs = {} for sub in elem: if sub.tag == 'files_dir': path = sub.get('path') elif sub.tag == 'extra_dir': type = sub.get('type') extra_dirs[type] = sub.get('path') self.backends[id] = DiskObjectStore(config, file_path=path, extra_dirs=extra_dirs) self.max_percent_full[id] = maxpctfull log.debug("Loaded disk backend '%s' with weight %s and file_path: %s" % (id, weight, path)) if extra_dirs: log.debug(" Extra directories:") for type, dir in extra_dirs.items(): log.debug(" %s: %s" % (type, dir)) for i in range(0, weight): # The simplest way to do weighting: add backend ids to a # sequence the number of times equalling weight, then randomly # choose a backend from that sequence at creation self.weighted_backend_ids.append(id) self.original_weighted_backend_ids = self.weighted_backend_ids def shutdown(self): super(DistributedObjectStore, self).shutdown() if self.sleeper is not None: self.sleeper.wake() def __filesystem_monitor(self): while self.running: new_weighted_backend_ids = self.original_weighted_backend_ids for id, backend in self.backends.items(): maxpct = self.max_percent_full[id] or self.global_max_percent_full pct = backend.get_store_usage_percent() if pct > maxpct: new_weighted_backend_ids = filter(lambda x: x != id, new_weighted_backend_ids) self.weighted_backend_ids = new_weighted_backend_ids self.sleeper.sleep(120) # Test free space every 2 minutes def create(self, obj, **kwargs): """ create() is the only method in which obj.object_store_id may be None """ if obj.object_store_id is None or not self.exists(obj, **kwargs): if obj.object_store_id is None or obj.object_store_id not in self.weighted_backend_ids: try: obj.object_store_id = random.choice(self.weighted_backend_ids) except IndexError: raise ObjectInvalid( 'objectstore.create, could not generate obj.object_store_id: %s, kwargs: %s' % ( str( obj ), str( kwargs ) ) ) create_object_in_session( obj ) log.debug("Selected backend '%s' for creation of %s %s" % (obj.object_store_id, obj.__class__.__name__, obj.id)) else: log.debug("Using preferred backend '%s' for creation of %s %s" % (obj.object_store_id, obj.__class__.__name__, obj.id)) self.backends[obj.object_store_id].create(obj, **kwargs) def __call_method(self, method, obj, default, default_is_exception, **kwargs): object_store_id = self.__get_store_id_for(obj, **kwargs) if object_store_id is not None: return self.backends[object_store_id].__getattribute__(method)(obj, **kwargs) if default_is_exception: raise default( 'objectstore, __call_method failed: %s on %s, kwargs: %s' % ( method, str( obj ), str( kwargs ) ) ) else: return default def __get_store_id_for(self, obj, **kwargs): if obj.object_store_id is not None and obj.object_store_id in self.backends: return obj.object_store_id else: # if this instance has been switched from a non-distributed to a # distributed object store, or if the object's store id is invalid, # try to locate the object log.warning('The backend object store ID (%s) for %s object with ID %s is invalid' % (obj.object_store_id, obj.__class__.__name__, obj.id)) for id, store in self.backends.items(): if store.exists(obj, **kwargs): log.warning('%s object with ID %s found in backend object store with ID %s' % (obj.__class__.__name__, obj.id, id)) obj.object_store_id = id create_object_in_session( obj ) return id return None class HierarchicalObjectStore(NestedObjectStore): """ ObjectStore that defers to a list of backends, for getting objects the first store where the object exists is used, objects are always created in the first store. """ def __init__(self, config, config_xml=None, fsmon=False): super(HierarchicalObjectStore, self).__init__(config, config_xml=config_xml) self.backends = odict() for b in sorted(config_xml.find('backends'), key=lambda b: int(b.get('order'))): self.backends[int(b.get('order'))] = build_object_store_from_config(config, fsmon=fsmon, config_xml=b) def exists(self, obj, **kwargs): """ Exists must check all child object stores """ for store in self.backends.values(): if store.exists(obj, **kwargs): return True return False def create(self, obj, **kwargs): """ Create will always be called by the primary object_store """ self.backends[0].create(obj, **kwargs) def build_object_store_from_config(config, fsmon=False, config_xml=None): """ Depending on the configuration setting, invoke the appropriate object store """ if config_xml is None and os.path.exists( config.object_store_config_file ): # This is a top level invocation of build_object_store_from_config, and # we have an object_store_conf.xml -- read the .xml and build # accordingly root = ElementTree.parse(config.object_store_config_file).getroot() store = root.get('type') config_xml = root elif config_xml is not None: store = config_xml.get('type') else: store = config.object_store if store == 'disk': return DiskObjectStore(config=config, config_xml=config_xml) elif store == 's3': from .s3 import S3ObjectStore return S3ObjectStore(config=config, config_xml=config_xml) elif store == 'swift': from .s3 import SwiftObjectStore return SwiftObjectStore(config=config, config_xml=config_xml) elif store == 'distributed': return DistributedObjectStore(config=config, fsmon=fsmon, config_xml=config_xml) elif store == 'hierarchical': return HierarchicalObjectStore(config=config, config_xml=config_xml) elif store == 'irods': from .rods import IRODSObjectStore return IRODSObjectStore(config=config, config_xml=config_xml) elif store == 'pulsar': from .pulsar import PulsarObjectStore return PulsarObjectStore(config=config, config_xml=config_xml) else: log.error("Unrecognized object store definition: {0}".format(store)) def local_extra_dirs( func ): """ A decorator for non-local plugins to utilize local directories for their extra_dirs (job_working_directory and temp). """ def wraps( self, *args, **kwargs ): if kwargs.get( 'base_dir', None ) is None: return func( self, *args, **kwargs ) else: for c in self.__class__.__mro__: if c.__name__ == 'DiskObjectStore': return getattr( c, func.__name__ )( self, *args, **kwargs ) raise Exception( "Could not call DiskObjectStore's %s method, does your Object Store plugin inherit from DiskObjectStore?" % func.__name__ ) return wraps def convert_bytes(bytes): """ A helper function used for pretty printing disk usage """ if bytes is None: bytes = 0 bytes = float(bytes) if bytes >= 1099511627776: terabytes = bytes / 1099511627776 size = '%.2fTB' % terabytes elif bytes >= 1073741824: gigabytes = bytes / 1073741824 size = '%.2fGB' % gigabytes elif bytes >= 1048576: megabytes = bytes / 1048576 size = '%.2fMB' % megabytes elif bytes >= 1024: kilobytes = bytes / 1024 size = '%.2fKB' % kilobytes else: size = '%.2fb' % bytes return size def create_object_in_session( obj ): session = object_session( obj ) if object_session is not None else None if session is not None: object_session( obj ).add( obj ) object_session( obj ).flush() else: raise Exception( NO_SESSION_ERROR_MESSAGE )
gpl-3.0
studio666/gnuradio
gr-utils/python/modtool/gr-newmod/docs/doxygen/doxyxml/base.py
333
6794
# # Copyright 2010 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # """ A base class is created. Classes based upon this are used to make more user-friendly interfaces to the doxygen xml docs than the generated classes provide. """ import os import pdb from xml.parsers.expat import ExpatError from generated import compound class Base(object): class Duplicate(StandardError): pass class NoSuchMember(StandardError): pass class ParsingError(StandardError): pass def __init__(self, parse_data, top=None): self._parsed = False self._error = False self._parse_data = parse_data self._members = [] self._dict_members = {} self._in_category = {} self._data = {} if top is not None: self._xml_path = top._xml_path # Set up holder of references else: top = self self._refs = {} self._xml_path = parse_data self.top = top @classmethod def from_refid(cls, refid, top=None): """ Instantiate class from a refid rather than parsing object. """ # First check to see if its already been instantiated. if top is not None and refid in top._refs: return top._refs[refid] # Otherwise create a new instance and set refid. inst = cls(None, top=top) inst.refid = refid inst.add_ref(inst) return inst @classmethod def from_parse_data(cls, parse_data, top=None): refid = getattr(parse_data, 'refid', None) if refid is not None and top is not None and refid in top._refs: return top._refs[refid] inst = cls(parse_data, top=top) if refid is not None: inst.refid = refid inst.add_ref(inst) return inst def add_ref(self, obj): if hasattr(obj, 'refid'): self.top._refs[obj.refid] = obj mem_classes = [] def get_cls(self, mem): for cls in self.mem_classes: if cls.can_parse(mem): return cls raise StandardError(("Did not find a class for object '%s'." \ % (mem.get_name()))) def convert_mem(self, mem): try: cls = self.get_cls(mem) converted = cls.from_parse_data(mem, self.top) if converted is None: raise StandardError('No class matched this object.') self.add_ref(converted) return converted except StandardError, e: print e @classmethod def includes(cls, inst): return isinstance(inst, cls) @classmethod def can_parse(cls, obj): return False def _parse(self): self._parsed = True def _get_dict_members(self, cat=None): """ For given category a dictionary is returned mapping member names to members of that category. For names that are duplicated the name is mapped to None. """ self.confirm_no_error() if cat not in self._dict_members: new_dict = {} for mem in self.in_category(cat): if mem.name() not in new_dict: new_dict[mem.name()] = mem else: new_dict[mem.name()] = self.Duplicate self._dict_members[cat] = new_dict return self._dict_members[cat] def in_category(self, cat): self.confirm_no_error() if cat is None: return self._members if cat not in self._in_category: self._in_category[cat] = [mem for mem in self._members if cat.includes(mem)] return self._in_category[cat] def get_member(self, name, cat=None): self.confirm_no_error() # Check if it's in a namespace or class. bits = name.split('::') first = bits[0] rest = '::'.join(bits[1:]) member = self._get_dict_members(cat).get(first, self.NoSuchMember) # Raise any errors that are returned. if member in set([self.NoSuchMember, self.Duplicate]): raise member() if rest: return member.get_member(rest, cat=cat) return member def has_member(self, name, cat=None): try: mem = self.get_member(name, cat=cat) return True except self.NoSuchMember: return False def data(self): self.confirm_no_error() return self._data def members(self): self.confirm_no_error() return self._members def process_memberdefs(self): mdtss = [] for sec in self._retrieved_data.compounddef.sectiondef: mdtss += sec.memberdef # At the moment we lose all information associated with sections. # Sometimes a memberdef is in several sectiondef. # We make sure we don't get duplicates here. uniques = set([]) for mem in mdtss: converted = self.convert_mem(mem) pair = (mem.name, mem.__class__) if pair not in uniques: uniques.add(pair) self._members.append(converted) def retrieve_data(self): filename = os.path.join(self._xml_path, self.refid + '.xml') try: self._retrieved_data = compound.parse(filename) except ExpatError: print('Error in xml in file %s' % filename) self._error = True self._retrieved_data = None def check_parsed(self): if not self._parsed: self._parse() def confirm_no_error(self): self.check_parsed() if self._error: raise self.ParsingError() def error(self): self.check_parsed() return self._error def name(self): # first see if we can do it without processing. if self._parse_data is not None: return self._parse_data.name self.check_parsed() return self._retrieved_data.compounddef.name
gpl-3.0
kursitet/edx-platform
lms/djangoapps/courseware/features/conditional.py
31
4647
# pylint: disable=missing-docstring from lettuce import world, steps from nose.tools import assert_in, assert_true from common import i_am_registered_for_the_course, visit_scenario_item from problems_setup import add_problem_to_course, answer_problem @steps class ConditionalSteps(object): COURSE_NUM = 'test_course' def setup_conditional(self, step, condition_type, condition, cond_value): r'that a course has a Conditional conditioned on (?P<condition_type>\w+) (?P<condition>\w+)=(?P<cond_value>\w+)$' i_am_registered_for_the_course(step, self.COURSE_NUM) world.scenario_dict['VERTICAL'] = world.ItemFactory( parent_location=world.scenario_dict['SECTION'].location, category='vertical', display_name="Test Vertical", ) world.scenario_dict['WRAPPER'] = world.ItemFactory( parent_location=world.scenario_dict['VERTICAL'].location, category='wrapper', display_name="Test Poll Wrapper" ) if condition_type == 'problem': world.scenario_dict['CONDITION_SOURCE'] = add_problem_to_course(self.COURSE_NUM, 'string') elif condition_type == 'poll': world.scenario_dict['CONDITION_SOURCE'] = world.ItemFactory( parent_location=world.scenario_dict['WRAPPER'].location, category='poll_question', display_name='Conditional Poll', data={ 'question': 'Is this a good poll?', 'answers': [ {'id': 'yes', 'text': 'Yes, of course'}, {'id': 'no', 'text': 'Of course not!'} ], } ) else: raise Exception("Unknown condition type: {!r}".format(condition_type)) metadata = { 'xml_attributes': { condition: cond_value } } world.scenario_dict['CONDITIONAL'] = world.ItemFactory( parent_location=world.scenario_dict['WRAPPER'].location, category='conditional', display_name="Test Conditional", metadata=metadata, sources_list=[world.scenario_dict['CONDITION_SOURCE'].location], ) world.ItemFactory( parent_location=world.scenario_dict['CONDITIONAL'].location, category='html', display_name='Conditional Contents', data='<html><div class="hidden-contents">Hidden Contents</p></html>' ) def setup_problem_attempts(self, step, not_attempted=None): r'that the conditioned problem has (?P<not_attempted>not )?been attempted$' visit_scenario_item('CONDITION_SOURCE') if not_attempted is None: answer_problem(self.COURSE_NUM, 'string', True) world.css_click("button.check") def when_i_view_the_conditional(self, step): r'I view the conditional$' visit_scenario_item('CONDITIONAL') world.wait_for_js_variable_truthy('$(".xblock-student_view[data-type=Conditional]").data("initialized")') def check_visibility(self, step, visible): r'the conditional contents are (?P<visible>\w+)$' world.wait_for_ajax_complete() assert_in(visible, ('visible', 'hidden')) if visible == 'visible': world.wait_for_visible('.hidden-contents') assert_true(world.css_visible('.hidden-contents')) else: assert_true(world.is_css_not_present('.hidden-contents')) assert_true( world.css_contains_text( '.conditional-message', 'must be attempted before this will become visible.' ) ) def answer_poll(self, step, answer): r' I answer the conditioned poll "([^"]*)"$' visit_scenario_item('CONDITION_SOURCE') world.wait_for_js_variable_truthy('$(".xblock-student_view[data-type=Poll]").data("initialized")') world.wait_for_ajax_complete() answer_text = [ poll_answer['text'] for poll_answer in world.scenario_dict['CONDITION_SOURCE'].answers if poll_answer['id'] == answer ][0] text_selector = '.poll_answer .text' poll_texts = world.retry_on_exception( lambda: [elem.text for elem in world.css_find(text_selector)] ) for idx, poll_text in enumerate(poll_texts): if poll_text == answer_text: world.css_click(text_selector, index=idx) return ConditionalSteps()
agpl-3.0
rahul67/hue
desktop/core/ext-py/PyYAML-3.09/lib/yaml/representer.py
114
17706
__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer', 'RepresenterError'] from error import * from nodes import * import datetime try: set except NameError: from sets import Set as set import sys, copy_reg, types class RepresenterError(YAMLError): pass class BaseRepresenter(object): yaml_representers = {} yaml_multi_representers = {} def __init__(self, default_style=None, default_flow_style=None): self.default_style = default_style self.default_flow_style = default_flow_style self.represented_objects = {} self.object_keeper = [] self.alias_key = None def represent(self, data): node = self.represent_data(data) self.serialize(node) self.represented_objects = {} self.object_keeper = [] self.alias_key = None def get_classobj_bases(self, cls): bases = [cls] for base in cls.__bases__: bases.extend(self.get_classobj_bases(base)) return bases def represent_data(self, data): if self.ignore_aliases(data): self.alias_key = None else: self.alias_key = id(data) if self.alias_key is not None: if self.alias_key in self.represented_objects: node = self.represented_objects[self.alias_key] #if node is None: # raise RepresenterError("recursive objects are not allowed: %r" % data) return node #self.represented_objects[alias_key] = None self.object_keeper.append(data) data_types = type(data).__mro__ if type(data) is types.InstanceType: data_types = self.get_classobj_bases(data.__class__)+list(data_types) if data_types[0] in self.yaml_representers: node = self.yaml_representers[data_types[0]](self, data) else: for data_type in data_types: if data_type in self.yaml_multi_representers: node = self.yaml_multi_representers[data_type](self, data) break else: if None in self.yaml_multi_representers: node = self.yaml_multi_representers[None](self, data) elif None in self.yaml_representers: node = self.yaml_representers[None](self, data) else: node = ScalarNode(None, unicode(data)) #if alias_key is not None: # self.represented_objects[alias_key] = node return node def add_representer(cls, data_type, representer): if not 'yaml_representers' in cls.__dict__: cls.yaml_representers = cls.yaml_representers.copy() cls.yaml_representers[data_type] = representer add_representer = classmethod(add_representer) def add_multi_representer(cls, data_type, representer): if not 'yaml_multi_representers' in cls.__dict__: cls.yaml_multi_representers = cls.yaml_multi_representers.copy() cls.yaml_multi_representers[data_type] = representer add_multi_representer = classmethod(add_multi_representer) def represent_scalar(self, tag, value, style=None): if style is None: style = self.default_style node = ScalarNode(tag, value, style=style) if self.alias_key is not None: self.represented_objects[self.alias_key] = node return node def represent_sequence(self, tag, sequence, flow_style=None): value = [] node = SequenceNode(tag, value, flow_style=flow_style) if self.alias_key is not None: self.represented_objects[self.alias_key] = node best_style = True for item in sequence: node_item = self.represent_data(item) if not (isinstance(node_item, ScalarNode) and not node_item.style): best_style = False value.append(node_item) if flow_style is None: if self.default_flow_style is not None: node.flow_style = self.default_flow_style else: node.flow_style = best_style return node def represent_mapping(self, tag, mapping, flow_style=None): value = [] node = MappingNode(tag, value, flow_style=flow_style) if self.alias_key is not None: self.represented_objects[self.alias_key] = node best_style = True if hasattr(mapping, 'items'): mapping = mapping.items() mapping.sort() for item_key, item_value in mapping: node_key = self.represent_data(item_key) node_value = self.represent_data(item_value) if not (isinstance(node_key, ScalarNode) and not node_key.style): best_style = False if not (isinstance(node_value, ScalarNode) and not node_value.style): best_style = False value.append((node_key, node_value)) if flow_style is None: if self.default_flow_style is not None: node.flow_style = self.default_flow_style else: node.flow_style = best_style return node def ignore_aliases(self, data): return False class SafeRepresenter(BaseRepresenter): def ignore_aliases(self, data): if data in [None, ()]: return True if isinstance(data, (str, unicode, bool, int, float)): return True def represent_none(self, data): return self.represent_scalar(u'tag:yaml.org,2002:null', u'null') def represent_str(self, data): tag = None style = None try: data = unicode(data, 'ascii') tag = u'tag:yaml.org,2002:str' except UnicodeDecodeError: try: data = unicode(data, 'utf-8') tag = u'tag:yaml.org,2002:str' except UnicodeDecodeError: data = data.encode('base64') tag = u'tag:yaml.org,2002:binary' style = '|' return self.represent_scalar(tag, data, style=style) def represent_unicode(self, data): return self.represent_scalar(u'tag:yaml.org,2002:str', data) def represent_bool(self, data): if data: value = u'true' else: value = u'false' return self.represent_scalar(u'tag:yaml.org,2002:bool', value) def represent_int(self, data): return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data)) def represent_long(self, data): return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data)) inf_value = 1e300 while repr(inf_value) != repr(inf_value*inf_value): inf_value *= inf_value def represent_float(self, data): if data != data or (data == 0.0 and data == 1.0): value = u'.nan' elif data == self.inf_value: value = u'.inf' elif data == -self.inf_value: value = u'-.inf' else: value = unicode(repr(data)).lower() # Note that in some cases `repr(data)` represents a float number # without the decimal parts. For instance: # >>> repr(1e17) # '1e17' # Unfortunately, this is not a valid float representation according # to the definition of the `!!float` tag. We fix this by adding # '.0' before the 'e' symbol. if u'.' not in value and u'e' in value: value = value.replace(u'e', u'.0e', 1) return self.represent_scalar(u'tag:yaml.org,2002:float', value) def represent_list(self, data): #pairs = (len(data) > 0 and isinstance(data, list)) #if pairs: # for item in data: # if not isinstance(item, tuple) or len(item) != 2: # pairs = False # break #if not pairs: return self.represent_sequence(u'tag:yaml.org,2002:seq', data) #value = [] #for item_key, item_value in data: # value.append(self.represent_mapping(u'tag:yaml.org,2002:map', # [(item_key, item_value)])) #return SequenceNode(u'tag:yaml.org,2002:pairs', value) def represent_dict(self, data): return self.represent_mapping(u'tag:yaml.org,2002:map', data) def represent_set(self, data): value = {} for key in data: value[key] = None return self.represent_mapping(u'tag:yaml.org,2002:set', value) def represent_date(self, data): value = unicode(data.isoformat()) return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) def represent_datetime(self, data): value = unicode(data.isoformat(' ')) return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) def represent_yaml_object(self, tag, data, cls, flow_style=None): if hasattr(data, '__getstate__'): state = data.__getstate__() else: state = data.__dict__.copy() return self.represent_mapping(tag, state, flow_style=flow_style) def represent_undefined(self, data): raise RepresenterError("cannot represent an object: %s" % data) SafeRepresenter.add_representer(type(None), SafeRepresenter.represent_none) SafeRepresenter.add_representer(str, SafeRepresenter.represent_str) SafeRepresenter.add_representer(unicode, SafeRepresenter.represent_unicode) SafeRepresenter.add_representer(bool, SafeRepresenter.represent_bool) SafeRepresenter.add_representer(int, SafeRepresenter.represent_int) SafeRepresenter.add_representer(long, SafeRepresenter.represent_long) SafeRepresenter.add_representer(float, SafeRepresenter.represent_float) SafeRepresenter.add_representer(list, SafeRepresenter.represent_list) SafeRepresenter.add_representer(tuple, SafeRepresenter.represent_list) SafeRepresenter.add_representer(dict, SafeRepresenter.represent_dict) SafeRepresenter.add_representer(set, SafeRepresenter.represent_set) SafeRepresenter.add_representer(datetime.date, SafeRepresenter.represent_date) SafeRepresenter.add_representer(datetime.datetime, SafeRepresenter.represent_datetime) SafeRepresenter.add_representer(None, SafeRepresenter.represent_undefined) class Representer(SafeRepresenter): def represent_str(self, data): tag = None style = None try: data = unicode(data, 'ascii') tag = u'tag:yaml.org,2002:str' except UnicodeDecodeError: try: data = unicode(data, 'utf-8') tag = u'tag:yaml.org,2002:python/str' except UnicodeDecodeError: data = data.encode('base64') tag = u'tag:yaml.org,2002:binary' style = '|' return self.represent_scalar(tag, data, style=style) def represent_unicode(self, data): tag = None try: data.encode('ascii') tag = u'tag:yaml.org,2002:python/unicode' except UnicodeEncodeError: tag = u'tag:yaml.org,2002:str' return self.represent_scalar(tag, data) def represent_long(self, data): tag = u'tag:yaml.org,2002:int' if int(data) is not data: tag = u'tag:yaml.org,2002:python/long' return self.represent_scalar(tag, unicode(data)) def represent_complex(self, data): if data.imag == 0.0: data = u'%r' % data.real elif data.real == 0.0: data = u'%rj' % data.imag elif data.imag > 0: data = u'%r+%rj' % (data.real, data.imag) else: data = u'%r%rj' % (data.real, data.imag) return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data) def represent_tuple(self, data): return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data) def represent_name(self, data): name = u'%s.%s' % (data.__module__, data.__name__) return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'') def represent_module(self, data): return self.represent_scalar( u'tag:yaml.org,2002:python/module:'+data.__name__, u'') def represent_instance(self, data): # For instances of classic classes, we use __getinitargs__ and # __getstate__ to serialize the data. # If data.__getinitargs__ exists, the object must be reconstructed by # calling cls(**args), where args is a tuple returned by # __getinitargs__. Otherwise, the cls.__init__ method should never be # called and the class instance is created by instantiating a trivial # class and assigning to the instance's __class__ variable. # If data.__getstate__ exists, it returns the state of the object. # Otherwise, the state of the object is data.__dict__. # We produce either a !!python/object or !!python/object/new node. # If data.__getinitargs__ does not exist and state is a dictionary, we # produce a !!python/object node . Otherwise we produce a # !!python/object/new node. cls = data.__class__ class_name = u'%s.%s' % (cls.__module__, cls.__name__) args = None state = None if hasattr(data, '__getinitargs__'): args = list(data.__getinitargs__()) if hasattr(data, '__getstate__'): state = data.__getstate__() else: state = data.__dict__ if args is None and isinstance(state, dict): return self.represent_mapping( u'tag:yaml.org,2002:python/object:'+class_name, state) if isinstance(state, dict) and not state: return self.represent_sequence( u'tag:yaml.org,2002:python/object/new:'+class_name, args) value = {} if args: value['args'] = args value['state'] = state return self.represent_mapping( u'tag:yaml.org,2002:python/object/new:'+class_name, value) def represent_object(self, data): # We use __reduce__ API to save the data. data.__reduce__ returns # a tuple of length 2-5: # (function, args, state, listitems, dictitems) # For reconstructing, we calls function(*args), then set its state, # listitems, and dictitems if they are not None. # A special case is when function.__name__ == '__newobj__'. In this # case we create the object with args[0].__new__(*args). # Another special case is when __reduce__ returns a string - we don't # support it. # We produce a !!python/object, !!python/object/new or # !!python/object/apply node. cls = type(data) if cls in copy_reg.dispatch_table: reduce = copy_reg.dispatch_table[cls](data) elif hasattr(data, '__reduce_ex__'): reduce = data.__reduce_ex__(2) elif hasattr(data, '__reduce__'): reduce = data.__reduce__() else: raise RepresenterError("cannot represent object: %r" % data) reduce = (list(reduce)+[None]*5)[:5] function, args, state, listitems, dictitems = reduce args = list(args) if state is None: state = {} if listitems is not None: listitems = list(listitems) if dictitems is not None: dictitems = dict(dictitems) if function.__name__ == '__newobj__': function = args[0] args = args[1:] tag = u'tag:yaml.org,2002:python/object/new:' newobj = True else: tag = u'tag:yaml.org,2002:python/object/apply:' newobj = False function_name = u'%s.%s' % (function.__module__, function.__name__) if not args and not listitems and not dictitems \ and isinstance(state, dict) and newobj: return self.represent_mapping( u'tag:yaml.org,2002:python/object:'+function_name, state) if not listitems and not dictitems \ and isinstance(state, dict) and not state: return self.represent_sequence(tag+function_name, args) value = {} if args: value['args'] = args if state or not isinstance(state, dict): value['state'] = state if listitems: value['listitems'] = listitems if dictitems: value['dictitems'] = dictitems return self.represent_mapping(tag+function_name, value) Representer.add_representer(str, Representer.represent_str) Representer.add_representer(unicode, Representer.represent_unicode) Representer.add_representer(long, Representer.represent_long) Representer.add_representer(complex, Representer.represent_complex) Representer.add_representer(tuple, Representer.represent_tuple) Representer.add_representer(type, Representer.represent_name) Representer.add_representer(types.ClassType, Representer.represent_name) Representer.add_representer(types.FunctionType, Representer.represent_name) Representer.add_representer(types.BuiltinFunctionType, Representer.represent_name) Representer.add_representer(types.ModuleType, Representer.represent_module) Representer.add_multi_representer(types.InstanceType, Representer.represent_instance) Representer.add_multi_representer(object, Representer.represent_object)
apache-2.0
Conflei/ATI
[ATI] Misfenterest/Frontend/venv/lib/python2.6/site-packages/pip/_vendor/requests/packages/urllib3/poolmanager.py
678
9406
import logging try: # Python 3 from urllib.parse import urljoin except ImportError: from urlparse import urljoin from ._collections import RecentlyUsedContainer from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool from .connectionpool import port_by_scheme from .exceptions import LocationValueError, MaxRetryError from .request import RequestMethods from .util.url import parse_url from .util.retry import Retry __all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url'] pool_classes_by_scheme = { 'http': HTTPConnectionPool, 'https': HTTPSConnectionPool, } log = logging.getLogger(__name__) SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs', 'ssl_version') class PoolManager(RequestMethods): """ Allows for arbitrary requests while transparently keeping track of necessary connection pools for you. :param num_pools: Number of connection pools to cache before discarding the least recently used pool. :param headers: Headers to include with all requests, unless other headers are given explicitly. :param \**connection_pool_kw: Additional parameters are used to create fresh :class:`urllib3.connectionpool.ConnectionPool` instances. Example:: >>> manager = PoolManager(num_pools=2) >>> r = manager.request('GET', 'http://google.com/') >>> r = manager.request('GET', 'http://google.com/mail') >>> r = manager.request('GET', 'http://yahoo.com/') >>> len(manager.pools) 2 """ proxy = None def __init__(self, num_pools=10, headers=None, **connection_pool_kw): RequestMethods.__init__(self, headers) self.connection_pool_kw = connection_pool_kw self.pools = RecentlyUsedContainer(num_pools, dispose_func=lambda p: p.close()) def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.clear() # Return False to re-raise any potential exceptions return False def _new_pool(self, scheme, host, port): """ Create a new :class:`ConnectionPool` based on host, port and scheme. This method is used to actually create the connection pools handed out by :meth:`connection_from_url` and companion methods. It is intended to be overridden for customization. """ pool_cls = pool_classes_by_scheme[scheme] kwargs = self.connection_pool_kw if scheme == 'http': kwargs = self.connection_pool_kw.copy() for kw in SSL_KEYWORDS: kwargs.pop(kw, None) return pool_cls(host, port, **kwargs) def clear(self): """ Empty our store of pools and direct them all to close. This will not affect in-flight connections, but they will not be re-used after completion. """ self.pools.clear() def connection_from_host(self, host, port=None, scheme='http'): """ Get a :class:`ConnectionPool` based on the host, port, and scheme. If ``port`` isn't given, it will be derived from the ``scheme`` using ``urllib3.connectionpool.port_by_scheme``. """ if not host: raise LocationValueError("No host specified.") scheme = scheme or 'http' port = port or port_by_scheme.get(scheme, 80) pool_key = (scheme, host, port) with self.pools.lock: # If the scheme, host, or port doesn't match existing open # connections, open a new ConnectionPool. pool = self.pools.get(pool_key) if pool: return pool # Make a fresh ConnectionPool of the desired type pool = self._new_pool(scheme, host, port) self.pools[pool_key] = pool return pool def connection_from_url(self, url): """ Similar to :func:`urllib3.connectionpool.connection_from_url` but doesn't pass any additional parameters to the :class:`urllib3.connectionpool.ConnectionPool` constructor. Additional parameters are taken from the :class:`.PoolManager` constructor. """ u = parse_url(url) return self.connection_from_host(u.host, port=u.port, scheme=u.scheme) def urlopen(self, method, url, redirect=True, **kw): """ Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen` with custom cross-host redirect logic and only sends the request-uri portion of the ``url``. The given ``url`` parameter must be absolute, such that an appropriate :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it. """ u = parse_url(url) conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme) kw['assert_same_host'] = False kw['redirect'] = False if 'headers' not in kw: kw['headers'] = self.headers if self.proxy is not None and u.scheme == "http": response = conn.urlopen(method, url, **kw) else: response = conn.urlopen(method, u.request_uri, **kw) redirect_location = redirect and response.get_redirect_location() if not redirect_location: return response # Support relative URLs for redirecting. redirect_location = urljoin(url, redirect_location) # RFC 7231, Section 6.4.4 if response.status == 303: method = 'GET' retries = kw.get('retries') if not isinstance(retries, Retry): retries = Retry.from_int(retries, redirect=redirect) try: retries = retries.increment(method, url, response=response, _pool=conn) except MaxRetryError: if retries.raise_on_redirect: raise return response kw['retries'] = retries kw['redirect'] = redirect log.info("Redirecting %s -> %s" % (url, redirect_location)) return self.urlopen(method, redirect_location, **kw) class ProxyManager(PoolManager): """ Behaves just like :class:`PoolManager`, but sends all requests through the defined proxy, using the CONNECT method for HTTPS URLs. :param proxy_url: The URL of the proxy to be used. :param proxy_headers: A dictionary contaning headers that will be sent to the proxy. In case of HTTP they are being sent with each request, while in the HTTPS/CONNECT case they are sent only once. Could be used for proxy authentication. Example: >>> proxy = urllib3.ProxyManager('http://localhost:3128/') >>> r1 = proxy.request('GET', 'http://google.com/') >>> r2 = proxy.request('GET', 'http://httpbin.org/') >>> len(proxy.pools) 1 >>> r3 = proxy.request('GET', 'https://httpbin.org/') >>> r4 = proxy.request('GET', 'https://twitter.com/') >>> len(proxy.pools) 3 """ def __init__(self, proxy_url, num_pools=10, headers=None, proxy_headers=None, **connection_pool_kw): if isinstance(proxy_url, HTTPConnectionPool): proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host, proxy_url.port) proxy = parse_url(proxy_url) if not proxy.port: port = port_by_scheme.get(proxy.scheme, 80) proxy = proxy._replace(port=port) assert proxy.scheme in ("http", "https"), \ 'Not supported proxy scheme %s' % proxy.scheme self.proxy = proxy self.proxy_headers = proxy_headers or {} connection_pool_kw['_proxy'] = self.proxy connection_pool_kw['_proxy_headers'] = self.proxy_headers super(ProxyManager, self).__init__( num_pools, headers, **connection_pool_kw) def connection_from_host(self, host, port=None, scheme='http'): if scheme == "https": return super(ProxyManager, self).connection_from_host( host, port, scheme) return super(ProxyManager, self).connection_from_host( self.proxy.host, self.proxy.port, self.proxy.scheme) def _set_proxy_headers(self, url, headers=None): """ Sets headers needed by proxies: specifically, the Accept and Host headers. Only sets headers not provided by the user. """ headers_ = {'Accept': '*/*'} netloc = parse_url(url).netloc if netloc: headers_['Host'] = netloc if headers: headers_.update(headers) return headers_ def urlopen(self, method, url, redirect=True, **kw): "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute." u = parse_url(url) if u.scheme == "http": # For proxied HTTPS requests, httplib sets the necessary headers # on the CONNECT to the proxy. For HTTP, we'll definitely # need to set 'Host' at the very least. headers = kw.get('headers', self.headers) kw['headers'] = self._set_proxy_headers(url, headers) return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw) def proxy_from_url(url, **kw): return ProxyManager(proxy_url=url, **kw)
mit
DanteOnline/free-art
venv/lib/python3.4/site-packages/django/core/management/sql.py
399
1890
from __future__ import unicode_literals from django.apps import apps from django.db import models def sql_flush(style, connection, only_django=False, reset_sequences=True, allow_cascade=False): """ Returns a list of the SQL statements used to flush the database. If only_django is True, then only table names that have associated Django models and are in INSTALLED_APPS will be included. """ if only_django: tables = connection.introspection.django_table_names(only_existing=True, include_views=False) else: tables = connection.introspection.table_names(include_views=False) seqs = connection.introspection.sequence_list() if reset_sequences else () statements = connection.ops.sql_flush(style, tables, seqs, allow_cascade) return statements def emit_pre_migrate_signal(verbosity, interactive, db): # Emit the pre_migrate signal for every application. for app_config in apps.get_app_configs(): if app_config.models_module is None: continue if verbosity >= 2: print("Running pre-migrate handlers for application %s" % app_config.label) models.signals.pre_migrate.send( sender=app_config, app_config=app_config, verbosity=verbosity, interactive=interactive, using=db) def emit_post_migrate_signal(verbosity, interactive, db): # Emit the post_migrate signal for every application. for app_config in apps.get_app_configs(): if app_config.models_module is None: continue if verbosity >= 2: print("Running post-migrate handlers for application %s" % app_config.label) models.signals.post_migrate.send( sender=app_config, app_config=app_config, verbosity=verbosity, interactive=interactive, using=db)
gpl-3.0
bpsinc-native/src_third_party_scons-2.0.1
engine/SCons/Tool/linkloc.py
61
4006
"""SCons.Tool.linkloc Tool specification for the LinkLoc linker for the Phar Lap ETS embedded operating system. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Tool/linkloc.py 5134 2010/08/16 23:02:40 bdeegan" import os.path import re import SCons.Action import SCons.Defaults import SCons.Errors import SCons.Tool import SCons.Util from SCons.Tool.MSCommon import msvs_exists, merge_default_version from SCons.Tool.PharLapCommon import addPharLapPaths _re_linker_command = re.compile(r'(\s)@\s*([^\s]+)') def repl_linker_command(m): # Replaces any linker command file directives (e.g. "@foo.lnk") with # the actual contents of the file. try: f=open(m.group(2), "r") return m.group(1) + f.read() except IOError: # the linker should return an error if it can't # find the linker command file so we will remain quiet. # However, we will replace the @ with a # so we will not continue # to find it with recursive substitution return m.group(1) + '#' + m.group(2) class LinklocGenerator(object): def __init__(self, cmdline): self.cmdline = cmdline def __call__(self, env, target, source, for_signature): if for_signature: # Expand the contents of any linker command files recursively subs = 1 strsub = env.subst(self.cmdline, target=target, source=source) while subs: strsub, subs = _re_linker_command.subn(repl_linker_command, strsub) return strsub else: return "${TEMPFILE('" + self.cmdline + "')}" def generate(env): """Add Builders and construction variables for ar to an Environment.""" SCons.Tool.createSharedLibBuilder(env) SCons.Tool.createProgBuilder(env) env['SUBST_CMD_FILE'] = LinklocGenerator env['SHLINK'] = '$LINK' env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS') env['SHLINKCOM'] = '${SUBST_CMD_FILE("$SHLINK $SHLINKFLAGS $_LIBDIRFLAGS $_LIBFLAGS -dll $TARGET $SOURCES")}' env['SHLIBEMITTER']= None env['LINK'] = "linkloc" env['LINKFLAGS'] = SCons.Util.CLVar('') env['LINKCOM'] = '${SUBST_CMD_FILE("$LINK $LINKFLAGS $_LIBDIRFLAGS $_LIBFLAGS -exe $TARGET $SOURCES")}' env['LIBDIRPREFIX']='-libpath ' env['LIBDIRSUFFIX']='' env['LIBLINKPREFIX']='-lib ' env['LIBLINKSUFFIX']='$LIBSUFFIX' # Set-up ms tools paths for default version merge_default_version(env) addPharLapPaths(env) def exists(env): if msvs_exists(): return env.Detect('linkloc') else: return 0 # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
mit
hengyicai/OnlineAggregationUCAS
python/pyspark/streaming/util.py
3
4345
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import time from datetime import datetime import traceback from pyspark import SparkContext, RDD class TransformFunction(object): """ This class wraps a function RDD[X] -> RDD[Y] that was passed to DStream.transform(), allowing it to be called from Java via Py4J's callback server. Java calls this function with a sequence of JavaRDDs and this function returns a single JavaRDD pointer back to Java. """ _emptyRDD = None def __init__(self, ctx, func, *deserializers): self.ctx = ctx self.func = func self.deserializers = deserializers def call(self, milliseconds, jrdds): try: if self.ctx is None: self.ctx = SparkContext._active_spark_context if not self.ctx or not self.ctx._jsc: # stopped return # extend deserializers with the first one sers = self.deserializers if len(sers) < len(jrdds): sers += (sers[0],) * (len(jrdds) - len(sers)) rdds = [RDD(jrdd, self.ctx, ser) if jrdd else None for jrdd, ser in zip(jrdds, sers)] t = datetime.fromtimestamp(milliseconds / 1000.0) r = self.func(t, *rdds) if r: return r._jrdd except Exception: traceback.print_exc() def __repr__(self): return "TransformFunction(%s)" % self.func class Java: implements = ['org.apache.spark.streaming.api.python.PythonTransformFunction'] class TransformFunctionSerializer(object): """ This class implements a serializer for PythonTransformFunction Java objects. This is necessary because the Java PythonTransformFunction objects are actually Py4J references to Python objects and thus are not directly serializable. When Java needs to serialize a PythonTransformFunction, it uses this class to invoke Python, which returns the serialized function as a byte array. """ def __init__(self, ctx, serializer, gateway=None): self.ctx = ctx self.serializer = serializer self.gateway = gateway or self.ctx._gateway self.gateway.jvm.PythonDStream.registerSerializer(self) def dumps(self, id): try: func = self.gateway.gateway_property.pool[id] return bytearray(self.serializer.dumps((func.func, func.deserializers))) except Exception: traceback.print_exc() def loads(self, bytes): try: f, deserializers = self.serializer.loads(str(bytes)) return TransformFunction(self.ctx, f, *deserializers) except Exception: traceback.print_exc() def __repr__(self): return "TransformFunctionSerializer(%s)" % self.serializer class Java: implements = ['org.apache.spark.streaming.api.python.PythonTransformFunctionSerializer'] def rddToFileName(prefix, suffix, timestamp): """ Return string prefix-time(.suffix) >>> rddToFileName("spark", None, 12345678910) 'spark-12345678910' >>> rddToFileName("spark", "tmp", 12345678910) 'spark-12345678910.tmp' """ if isinstance(timestamp, datetime): seconds = time.mktime(timestamp.timetuple()) timestamp = long(seconds * 1000) + timestamp.microsecond / 1000 if suffix is None: return prefix + "-" + str(timestamp) else: return prefix + "-" + str(timestamp) + "." + suffix if __name__ == "__main__": import doctest doctest.testmod()
apache-2.0
Intel-tensorflow/tensorflow
tensorflow/python/kernel_tests/relu_op_test.py
9
22679
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Relu and ReluGrad.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.eager import backprop from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import gradient_checker_v2 from tensorflow.python.ops import nn_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import variables import tensorflow.python.ops.nn_grad # pylint: disable=unused-import from tensorflow.python.platform import test from tensorflow.python.training import gradient_descent def _elu_grad_grad(activation): if activation < 0: return np.exp(activation) return 0 class ReluTest(test.TestCase): def _npRelu(self, np_features): return np.maximum(np_features, np.zeros(np_features.shape)) def testNpRelu(self): self.assertAllClose( np.array([[0.0, 0.7, 0.0, 0.3, 0.0], [0.1, 0.0, 0.5, 0.0, 0.9]]), self._npRelu( np.array([[-0.9, 0.7, -0.5, 0.3, -0.1], [0.1, -0.3, 0.5, -0.7, 0.9]]))) def _testRelu(self, np_features): np_relu = self._npRelu(np_features) tf_relu = nn_ops.relu(np_features) self.assertAllClose(np_relu, tf_relu) self.assertShapeEqual(np_relu, tf_relu) def testNumbersCPU(self): for t in [np.int32, np.int64, np.float16, np.float32, np.float64]: # Force execution on CPU even if a GPU kernel is available for the type. with ops.device("/device:CPU:0"): self._testRelu( np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t)) def testNumbersGPU(self): if not test.is_gpu_available(): self.skipTest("No GPU available") for t in [np.float16, np.float32, np.float64]: self._testRelu( np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t)) def testReluInt8x4GoodShape(self): if not test.is_gpu_available(cuda_only=True): self.skipTest("No GPU available") inputs = np.array([[-50, 7, 23, 0], [-1, -5, 6, 11]]) np_relu = self._npRelu(inputs) tf_relu = nn_ops.relu(constant_op.constant(inputs, dtypes.qint8)) self.assertAllClose(np_relu, tf_relu) self.assertShapeEqual(np_relu, tf_relu) @test_util.disable_xla("b/123338077") # Passes with XLA def testReluInt8x4BadShape(self): if not test.is_gpu_available(cuda_only=True): self.skipTest("No GPU available") inputs = constant_op.constant( np.array([[-50, 7, 23], [0, 1, -5], [6, -2, 11]]), dtypes.qint8) with self.assertRaisesRegex( errors.InvalidArgumentError, "Tensor size must be a multiple of 4 for Relu<qint8>. Got 9"): self.evaluate(nn_ops.relu(inputs)) inputs = constant_op.constant( np.array([1, -2, 3, -4, 5, -6, 7, -8, 9, -8, 7, -6, 5, -4, 3, -2, 1]), dtypes.qint8) with self.assertRaisesRegex( errors.InvalidArgumentError, "Tensor size must be a multiple of 4 for Relu<qint8>. Got 17"): self.evaluate(nn_ops.relu(inputs)) def testNoElement(self): self._testRelu(np.array([[], []], dtype=np.float32)) @test_util.disable_xla("b/157978028: Does not yet pass with XLA") def testNaNPropagation(self): for t in [np.float16, np.float32, np.float64]: self._testRelu(np.array([-1, np.nan, 1, np.nan]).astype(t)) # The gradient test for ReLU is a bit tricky as the derivative is not well # defined at around zero and we want to avoid that in terms of input values. def testGradientFloat32(self): with self.cached_session(): x = np.asarray( [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]], dtype=np.float32, order="F") err = gradient_checker_v2.max_error(*gradient_checker_v2.compute_gradient( nn_ops.relu, [x], delta=1.0 / 1024)) self.assertLess(err, 1e-6) # The gradient test for ReLU is a bit tricky as the derivative is not well # defined at around zero and we want to avoid that in terms of input values. def testGradientFloat16(self): with self.cached_session(): x = np.asarray( [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]], dtype=np.float16, order="F") err = gradient_checker_v2.max_error( *gradient_checker_v2.compute_gradient(nn_ops.relu, [x])) self.assertLess(err, 1e-6) def testGradientFloat64(self): with self.cached_session(): x = np.asarray( [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]], dtype=np.float64, order="F") err = gradient_checker_v2.max_error(*gradient_checker_v2.compute_gradient( nn_ops.relu, [x], delta=1.0 / 1024)) self.assertLess(err, 1e-15) def testGradGradFloat32(self): with self.cached_session(): def f(x): assert x.dtype == dtypes.float32 with backprop.GradientTape() as tape: tape.watch(x) y = nn_ops.relu(x) return tape.gradient(y, x) x = np.asarray( [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]], dtype=np.float32, order="F") err = gradient_checker_v2.max_error( *gradient_checker_v2.compute_gradient(f, [x], delta=1.0 / 1024)) self.assertLess(err, 1e-4) def testGradGradFloat64(self): with self.cached_session(): def f(x): assert x.dtype == dtypes.float64 with backprop.GradientTape() as tape: tape.watch(x) y = nn_ops.relu(x) return tape.gradient(y, x) x = np.asarray( [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]], dtype=np.float64, order="F") err = gradient_checker_v2.max_error( *gradient_checker_v2.compute_gradient(f, [x], delta=1.0 / 1024)) self.assertLess(err, 1e-10) def testGradientScalar(self): x = variables.Variable(100.) def loss(): return nn_ops.relu(x)**2 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.25) self.evaluate(variables.global_variables_initializer()) self.evaluate(optimizer.minimize(loss)) self.assertAllClose(x.read_value(), 50.0) def testGradientNoElement(self): with self.cached_session(): def f(x): with backprop.GradientTape() as tape: tape.watch(x) y = nn_ops.relu(x) return tape.gradient(y, x) x = np.asarray([[], []], dtype=np.float32) z = list(gradient_checker_v2.compute_gradient(f, [x]))[0][0] self.assertAllEqual(z, np.reshape(x, (0, 0))) class Relu6Test(test.TestCase): def _npRelu6(self, np_features): sixes = np.copy(np_features) sixes.fill(6.0) return np.minimum( np.maximum(np_features, np.zeros(np_features.shape)), sixes) def testNpRelu6(self): self.assertAllClose( np.array([[0.0, 0.7, 0.0, 0.3, 6.0], [0.1, 0.0, 6.0, 0.0, 0.9]]), self._npRelu6( np.array([[-0.9, 0.7, -0.5, 0.3, 6.0], [0.1, -0.3, 6.5, -0.7, 0.9]]))) def _testRelu6(self, np_features): np_relu6 = self._npRelu6(np_features) tf_relu6 = nn_ops.relu6(np_features) self.assertAllClose(np_relu6, tf_relu6) self.assertShapeEqual(np_relu6, tf_relu6) def testNumbersCPU(self): for t in [np.int32, np.int64, np.float16, np.float32, np.float64]: # Force execution on CPU even if a GPU kernel is available for the type. with ops.device("/device:CPU:0"): self._testRelu6( np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t)) def testNumbersGPU(self): if not test.is_gpu_available(): self.skipTest("No GPU available") for t in [np.float16, np.float, np.double]: self._testRelu6( np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t)) @test_util.disable_xla("b/157978028: Does not yet pass with XLA") def testNaNPropagation(self): for t in [np.float16, np.float32, np.float64]: self._testRelu6(np.array([-1, np.nan, 1, 7, np.nan]).astype(t)) # The gradient test for ReLU6 is a bit tricky as the derivative is # not well defined at around zero and six and we want to avoid that # in terms of input values. def testGradientFloat32(self): with self.cached_session(): x = np.asarray( [[-0.9, -0.7, -0.5, -0.3, -0.1], [6.1, 6.3, 6.5, 6.7, 6.9]], dtype=np.float32, order="F") err = gradient_checker_v2.max_error( *gradient_checker_v2.compute_gradient(nn_ops.relu6, [x])) self.assertLess(err, 1e-4) def testGradientFloat64(self): with self.cached_session(): x = np.asarray( [[-0.9, -0.7, -0.5, -0.3, -0.1], [6.1, 6.3, 6.5, 6.7, 6.9]], dtype=np.float64, order="F") err = gradient_checker_v2.max_error( *gradient_checker_v2.compute_gradient(nn_ops.relu6, [x])) self.assertLess(err, 1e-10) class LeakyReluTest(test.TestCase): def _npLeakyRelu(self, np_features, alpha=0.1): return np.maximum(np_features, alpha * np_features) def testNpLeakyRelu(self): self.assertAllClose( np.array([[-0.09, 0.7, -0.05, 0.3, -0.01], [0.1, -0.03, 0.5, -0.07, 0.9]]), self._npLeakyRelu( np.array([[-0.9, 0.7, -0.5, 0.3, -0.1], [0.1, -0.3, 0.5, -0.7, 0.9]]), alpha=0.1)) def _testLeakyRelu(self, np_features, alpha): np_leaky_relu = self._npLeakyRelu(np_features, alpha) tf_leaky_relu = nn_ops.leaky_relu(np_features, alpha) self.assertAllClose(np_leaky_relu, tf_leaky_relu) self.assertShapeEqual(np_leaky_relu, tf_leaky_relu) def testNumbersCPU(self): for t in [np.int32, np.int64, np.float16, np.float32, np.float64]: # Force execution on CPU even if a GPU kernel is available for the type. with ops.device("/device:CPU:0"): self._testLeakyRelu( np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t), alpha=0.2) def testNumbersGPU(self): if not test.is_gpu_available(): self.skipTest("No GPU available") for t in [np.float16, np.float32, np.float64]: self._testLeakyRelu( np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t), alpha=0.1) def testNaNPropagation(self): for t in [np.float16, np.float32, np.float64]: self._testLeakyRelu(np.array([-1, np.nan, 1, np.nan]).astype(t), alpha=0.2) # The gradient test for Leaky ReLU is a bit tricky as the derivative is not # well defined at around zero and we want to avoid that in terms of input # values. def testGradientFloat32(self): with self.cached_session(): x = np.asarray( [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]], dtype=np.float32, order="F") err = gradient_checker_v2.max_error( *gradient_checker_v2.compute_gradient(nn_ops.leaky_relu, [x])) self.assertLess(err, 1e-4) def testGradientFloat64(self): with self.cached_session(): x = np.asarray( [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]], dtype=np.float64, order="F") err = gradient_checker_v2.max_error( *gradient_checker_v2.compute_gradient(nn_ops.leaky_relu, [x])) self.assertLess(err, 1e-10) def testGradGradFloat32(self): with self.cached_session(): def f(x): assert x.dtype == dtypes.float32 with backprop.GradientTape() as tape: tape.watch(x) y = nn_ops.leaky_relu(x) return tape.gradient(y, x) x = np.asarray( [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]], dtype=np.float32, order="F") err = gradient_checker_v2.max_error( *gradient_checker_v2.compute_gradient(f, [x])) self.assertLess(err, 1e-4) def testGradGradFloat64(self): with self.cached_session(): def f(x): assert x.dtype == dtypes.float64 with backprop.GradientTape() as tape: tape.watch(x) y = nn_ops.leaky_relu(x) return tape.gradient(y, x) x = np.asarray( [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]], dtype=np.float64, order="F") err = gradient_checker_v2.max_error( *gradient_checker_v2.compute_gradient(f, [x])) self.assertLess(err, 1e-10) def testGradientScalar(self): x = variables.Variable(-100.) def loss(): return nn_ops.leaky_relu(x, 0.05)**2 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.2) self.evaluate(variables.global_variables_initializer()) self.evaluate(optimizer.minimize(loss)) self.assertAllClose(x.read_value(), -99.9) def testUnexpectedAlphaValue(self): self.assertAllClose( np.array([[-9.0, 0.7, -5.0, 0.3, -0.1], [0.1, -3.0, 0.5, -27.0, 0.9]]), nn_ops.leaky_relu( np.array([[-0.9, 0.7, -0.5, 0.3, -0.01], [0.1, -0.3, 0.5, -2.7, 0.9]]), alpha=10)) self.assertAllClose( np.array([[9.0, 0.7, 5.0, 0.3, 0.1], [0.1, 3.0, 0.5, 27.0, 0.9]]), nn_ops.leaky_relu( np.array([[-0.9, 0.7, -0.5, 0.3, -0.01], [0.1, -0.3, 0.5, -2.7, 0.9]]), alpha=-10)) class EluTest(test.TestCase): def _npElu(self, np_features): return np.where(np_features < 0, np.exp(np_features) - 1, np_features) def testNpElu(self): self.assertAllClose( np.array([[-0.59343034025, 0.7, -0.39346934028, 0.3, -0.09516258196], [0.1, -0.25918177931, 0.5, -0.5034146962, 0.9]]), self._npElu( np.array([[-0.9, 0.7, -0.5, 0.3, -0.1], [0.1, -0.3, 0.5, -0.7, 0.9]]))) def _testElu(self, np_features): np_elu = self._npElu(np_features) tf_elu = nn_ops.elu(np_features) self.assertAllCloseAccordingToType(np_elu, tf_elu) self.assertShapeEqual(np_elu, tf_elu) def testNumbersCPU(self): for t in [np.float16, np.float32, np.float64]: # Force execution on CPU even if a GPU kernel is available for the type. with ops.device("/device:CPU:0"): self._testElu( np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t)) def testNumbersGPU(self): if not test.is_gpu_available(): self.skipTest("No GPU available") for t in [np.float16, np.float32, np.float64]: self._testElu(np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t)) def testNaNPropagation(self): for t in [np.float16, np.float32, np.float64]: self._testElu(np.array([-1, np.nan, 1, np.nan]).astype(t)) def testGradientFloat32(self): with self.cached_session(): x_val = [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]] x = np.asarray(x_val, dtype=np.float32, order="F") err = gradient_checker_v2.max_error( *gradient_checker_v2.compute_gradient(nn_ops.elu, [x])) self.assertLess(err, 1e-4) def testGradientFloat64(self): with self.cached_session(): x_val = [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]] x = np.asarray(x_val, dtype=np.float64, order="F") err = gradient_checker_v2.max_error( *gradient_checker_v2.compute_gradient(nn_ops.elu, [x])) self.assertLess(err, 1e-6) def testGradGrad(self): with self.cached_session(): def f(x): with backprop.GradientTape(persistent=True) as tape: tape.watch(x) y = nn_ops.elu(x) dy = tape.gradient(y, x) return tape.gradient(dy, x) for x in [-1., -0.5, 0.5, 1.]: got = self.evaluate(f(constant_op.constant(x))) want = _elu_grad_grad(x) err = np.abs(got - want) self.assertLess(err, 1e-4) def testGradGradFloat32(self): with self.cached_session(): def f(x): assert x.dtype == dtypes.float32 with backprop.GradientTape() as tape: tape.watch(x) y = nn_ops.elu(x) return tape.gradient(y, x) x = np.asarray( [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]], dtype=np.float32, order="F") err = gradient_checker_v2.max_error( *gradient_checker_v2.compute_gradient(f, [x])) self.assertLess(err, 1e-4) def testGradGradFloat64(self): with self.cached_session(): def f(x): assert x.dtype == dtypes.float64 with backprop.GradientTape() as tape: tape.watch(x) y = nn_ops.elu(x) return tape.gradient(y, x) x = np.asarray( [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]], dtype=np.float64, order="F") err = gradient_checker_v2.max_error( *gradient_checker_v2.compute_gradient(f, [x])) self.assertLess(err, 1e-6) class SeluTest(test.TestCase): def _npSelu(self, np_features): scale = 1.0507009873554804934193349852946 scale_alpha = 1.7580993408473768599402175208123 return np.where(np_features < 0, scale_alpha * (np.exp(np_features) - 1), scale * np_features) def testNpSelu(self): self.assertAllClose( np.array([[-1.0433095, 0.73549069, -0.6917582, 0.3152103, -0.16730527], [0.1050701, -0.45566732, 0.5253505, -0.88505305, 0.9456309]]), self._npSelu( np.array([[-0.9, 0.7, -0.5, 0.3, -0.1], [0.1, -0.3, 0.5, -0.7, 0.9]]))) def _testSelu(self, np_features): np_selu = self._npSelu(np_features) tf_selu = nn_ops.selu(np_features) self.assertAllCloseAccordingToType(np_selu, tf_selu) self.assertShapeEqual(np_selu, tf_selu) def testNumbers(self): for t in [np.float16, np.float32, np.float64]: self._testSelu( np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t)) # Force executed on CPU in case GPU kernels are available. with ops.device("/device:CPU:0"): self._testSelu( np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t)) def testGradientFloat32(self): with self.cached_session(): x_val = [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]] x = np.asarray(x_val, dtype=np.float32, order="F") err = gradient_checker_v2.max_error(*gradient_checker_v2.compute_gradient( nn_ops.selu, [x], delta=1.0 / 1024)) self.assertLess(err, 1e-4) def testGradientFloat64(self): with self.cached_session(): x_val = [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]] x = np.asarray(x_val, dtype=np.float64, order="F") err = gradient_checker_v2.max_error( *gradient_checker_v2.compute_gradient(nn_ops.selu, [x])) self.assertLess(err, 1e-6) def testGradGradFloat32(self): with self.cached_session(): def f(x): assert x.dtype == dtypes.float32 with backprop.GradientTape() as tape: tape.watch(x) y = nn_ops.selu(x) return tape.gradient(y, x) x = np.asarray( [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]], dtype=np.float32, order="F") err = gradient_checker_v2.max_error( *gradient_checker_v2.compute_gradient(f, [x], delta=1.0 / 1024)) self.assertLess(err, 1e-4) def testGradGradFloat64(self): with self.cached_session(): def f(x): assert x.dtype == dtypes.float64 with backprop.GradientTape() as tape: tape.watch(x) y = nn_ops.selu(x) return tape.gradient(y, x) x = np.asarray( [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]], dtype=np.float64, order="F") err = gradient_checker_v2.max_error( *gradient_checker_v2.compute_gradient(f, [x])) self.assertLess(err, 1e-6) class CreluTest(test.TestCase): def testCreluShape(self): f = random_ops.random_normal([50, 5, 7, 10]) t = nn_ops.crelu(f) self.assertEqual([50, 5, 7, 20], t.get_shape()) def _testCrelu(self, np_features): np_relu = np.maximum(np_features, np.zeros_like(np_features)) np_neg_relu = np.maximum(-np_features, np.zeros_like(np_features)) np_crelu = np.concatenate((np_relu, np_neg_relu), len(np_features.shape) - 1) tf_crelu = nn_ops.crelu(np_features) self.assertAllClose(np_crelu, tf_crelu) self.assertShapeEqual(np_crelu, tf_crelu) def testNumbersCPU(self): for t in [np.int32, np.int64, np.float16, np.float32, np.float64]: # Force execution on CPU even if a GPU kernel is available for the type. with ops.device("/device:CPU:0"): self._testCrelu( np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t)) def testNumbersGPU(self): if not test.is_gpu_available(): self.skipTest("No GPU available") for t in [np.float16, np.float32, np.float64]: self._testCrelu( np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t)) def testNumbersWithAxis0(self): tf_crelu = nn_ops.crelu( np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]), axis=0) np_crelu = np.array([[0, 7, 0, 3, 0], [1, 0, 5, 0, 9], [9, 0, 5, 0, 1], [0, 3, 0, 7, 0]]) self.assertAllEqual(np_crelu, tf_crelu) def testNumbersWithAxis1(self): tf_crelu = nn_ops.crelu( np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]), axis=1) np_crelu = np.array([[0, 7, 0, 3, 0, 9, 0, 5, 0, 1], [1, 0, 5, 0, 9, 0, 3, 0, 7, 0]]) self.assertAllEqual(np_crelu, tf_crelu) if __name__ == "__main__": test.main()
apache-2.0
shelbycruver/real-python-test
env/lib/python2.7/site-packages/setuptools/tests/test_svn.py
300
7806
# -*- coding: utf-8 -*- """svn tests""" import io import os import subprocess import sys import unittest from setuptools.tests import environment from setuptools.compat import unicode, unichr from setuptools import svn_utils from setuptools.tests.py26compat import skipIf def _do_svn_check(): try: subprocess.check_call(["svn", "--version"], shell=(sys.platform == 'win32')) return True except (OSError, subprocess.CalledProcessError): return False _svn_check = _do_svn_check() class TestSvnVersion(unittest.TestCase): def test_no_svn_found(self): path_variable = None for env in os.environ: if env.lower() == 'path': path_variable = env if path_variable is None: try: self.skipTest('Cannot figure out how to modify path') except AttributeError: # PY26 doesn't have this return old_path = os.environ[path_variable] os.environ[path_variable] = '' try: version = svn_utils.SvnInfo.get_svn_version() self.assertEqual(version, '') finally: os.environ[path_variable] = old_path @skipIf(not _svn_check, "No SVN to text, in the first place") def test_svn_should_exist(self): version = svn_utils.SvnInfo.get_svn_version() self.assertNotEqual(version, '') def _read_utf8_file(path): fileobj = None try: fileobj = io.open(path, 'r', encoding='utf-8') data = fileobj.read() return data finally: if fileobj: fileobj.close() class ParserInfoXML(unittest.TestCase): def parse_tester(self, svn_name, ext_spaces): path = os.path.join('setuptools', 'tests', 'svn_data', svn_name + '_info.xml') #Remember these are pre-generated to test XML parsing # so these paths might not valid on your system example_base = "%s_example" % svn_name data = _read_utf8_file(path) expected = set([ ("\\".join((example_base, 'a file')), 'file'), ("\\".join((example_base, 'folder')), 'dir'), ("\\".join((example_base, 'folder', 'lalala.txt')), 'file'), ("\\".join((example_base, 'folder', 'quest.txt')), 'file'), ]) self.assertEqual(set(x for x in svn_utils.parse_dir_entries(data)), expected) def test_svn13(self): self.parse_tester('svn13', False) def test_svn14(self): self.parse_tester('svn14', False) def test_svn15(self): self.parse_tester('svn15', False) def test_svn16(self): self.parse_tester('svn16', True) def test_svn17(self): self.parse_tester('svn17', True) def test_svn18(self): self.parse_tester('svn18', True) class ParserExternalXML(unittest.TestCase): def parse_tester(self, svn_name, ext_spaces): path = os.path.join('setuptools', 'tests', 'svn_data', svn_name + '_ext_list.xml') example_base = svn_name + '_example' data = _read_utf8_file(path) if ext_spaces: folder2 = 'third party2' folder3 = 'third party3' else: folder2 = 'third_party2' folder3 = 'third_party3' expected = set([ os.sep.join((example_base, folder2)), os.sep.join((example_base, folder3)), # folder is third_party大介 os.sep.join((example_base, unicode('third_party') + unichr(0x5927) + unichr(0x4ecb))), os.sep.join((example_base, 'folder', folder2)), os.sep.join((example_base, 'folder', folder3)), os.sep.join((example_base, 'folder', unicode('third_party') + unichr(0x5927) + unichr(0x4ecb))), ]) expected = set(os.path.normpath(x) for x in expected) dir_base = os.sep.join(('C:', 'development', 'svn_example')) self.assertEqual(set(x for x in svn_utils.parse_externals_xml(data, dir_base)), expected) def test_svn15(self): self.parse_tester('svn15', False) def test_svn16(self): self.parse_tester('svn16', True) def test_svn17(self): self.parse_tester('svn17', True) def test_svn18(self): self.parse_tester('svn18', True) class ParseExternal(unittest.TestCase): def parse_tester(self, svn_name, ext_spaces): path = os.path.join('setuptools', 'tests', 'svn_data', svn_name + '_ext_list.txt') data = _read_utf8_file(path) if ext_spaces: expected = set(['third party2', 'third party3', 'third party3b', 'third_party']) else: expected = set(['third_party2', 'third_party3', 'third_party']) self.assertEqual(set(x for x in svn_utils.parse_external_prop(data)), expected) def test_svn13(self): self.parse_tester('svn13', False) def test_svn14(self): self.parse_tester('svn14', False) def test_svn15(self): self.parse_tester('svn15', False) def test_svn16(self): self.parse_tester('svn16', True) def test_svn17(self): self.parse_tester('svn17', True) def test_svn18(self): self.parse_tester('svn18', True) class TestSvn(environment.ZippedEnvironment): def setUp(self): version = svn_utils.SvnInfo.get_svn_version() if not version: # empty or null self.dataname = None self.datafile = None return self.base_version = tuple([int(x) for x in version.split('.')[:2]]) if self.base_version < (1,3): raise ValueError('Insufficient SVN Version %s' % version) elif self.base_version >= (1,9): #trying the latest version self.base_version = (1,8) self.dataname = "svn%i%i_example" % self.base_version self.datafile = os.path.join('setuptools', 'tests', 'svn_data', self.dataname + ".zip") super(TestSvn, self).setUp() @skipIf(not _svn_check, "No SVN to text, in the first place") def test_revision(self): rev = svn_utils.SvnInfo.load('.').get_revision() self.assertEqual(rev, 6) @skipIf(not _svn_check, "No SVN to text, in the first place") def test_entries(self): expected = set([ (os.path.join('a file'), 'file'), (os.path.join('folder'), 'dir'), (os.path.join('folder', 'lalala.txt'), 'file'), (os.path.join('folder', 'quest.txt'), 'file'), #The example will have a deleted file (or should) #but shouldn't return it ]) info = svn_utils.SvnInfo.load('.') self.assertEqual(set(x for x in info.entries), expected) @skipIf(not _svn_check, "No SVN to text, in the first place") def test_externals(self): if self.base_version >= (1,6): folder2 = 'third party2' folder3 = 'third party3' else: folder2 = 'third_party2' folder3 = 'third_party3' expected = set([ os.path.join(folder2), os.path.join(folder3), os.path.join('third_party'), os.path.join('folder', folder2), os.path.join('folder', folder3), os.path.join('folder', 'third_party'), ]) info = svn_utils.SvnInfo.load('.') self.assertEqual(set([x for x in info.externals]), expected) def test_suite(): return unittest.defaultTestLoader.loadTestsFromName(__name__)
gpl-2.0
bd808/apache-errors
apache-errors.py
1
8784
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (c) 2013 Bryan Davis and contributors """ Apache error log report generator """ import collections import datetime import hashlib import re import string import textwrap ERROR_FORMAT = ( r'\[(?P<datetime>[^\]]+)\] ' r'\[(?P<level>[^\]]+)\] ' r'(\[client (?P<ip>[^\]]+)\] )?' r'(?P<message>.*)' ) RE_ERROR_FORMAT = re.compile(ERROR_FORMAT) IGNORE_ITEM = '~~bd808.ignore.item~~' def parse_error_log (lines, logpat=RE_ERROR_FORMAT): """ Parse a log file into a sequence of dictionaries Args: lines: line generator logpat: regex to split lines Returns: generator of mapped lines """ groups = (logpat.match(line) for line in lines) tuples = (g.groupdict() for g in groups if g) log = field_map(tuples, 'message', expand_newlines) log = field_map(log, 'datetime', format_date) return log #end parse_error_log def field_map (dictseq, name, func): """ Process a sequence of dictionaries and remap one of the fields Typically used in a generator chain to coerce the datatype of a particular field. eg ``log = field_map(log, 'status', int)`` Args: dictseq: Sequence of dictionaries name: Field to modify func: Modification to apply """ for d in dictseq: if name in d: d[name] = func(d[name]) yield d #end field_map def format_date (d): """ Convert apache dates formatted as "Thu Mar 03 16:13:22 2011" to standard iso date format. Args: d: date string to reformat Returns: ISO 8601 formatted date (YYYY-MM-DDTHH:MM:SS) """ return datetime.datetime.strptime(d, '%a %b %d %H:%M:%S %Y').isoformat() #end format_date def expand_newlines (src): """ Replace occurances of the chars "\n" with an actual newline. Args: src: input string Returns: expanded string """ return string.replace(src, r'\n', '\n') #end expand_newlines def reduce_pattern_matches (x, y): """ Combine pattern map output by combining groupdict() results into a single dict. If either argument is None then the result is None because all patterns must match for the expectation to match. Args: x: re.MatchObject, dict or None y: re.MatchObject or None Return: dict with all matched catptures or None """ if x is None or y is None: return None captures = x if isinstance(x, dict) else x.groupdict() captures.update(y.groupdict()) return captures #end reduce_pattern_matches def print_report (log, expect): """ Print a report to stdout based on the given log generator and expected message configuration. If the regex for a given label includes named pattern captures those named captures can be used alter the label for a particular match. For example: >>> e = { 'found %(val)s': re.compile(r'something (?P<val>\d+)'), } The special label defined in the IGNORE_ITEM constant can be used to silently discard lines that are not desired to be reported as an occurance count or an unexpected entry. The generated report will have a block of label: count pairs at the top followed by pretty printed versions of any log entries that were found but not expected. Args: log: log generator expect: list of (label, dict) patterns """ found = collections.defaultdict(int) extra = [] dup = collections.defaultdict(int) output = False processed = collections.defaultdict(int) for r in log: unexpected = True processed[r['level']] += 1 for exp in expect: try: m = reduce(reduce_pattern_matches, map(lambda p: p[1].match(r[p[0]]), exp['match'].items())) except KeyError: m = None if m is not None: # grab named matches from pattern match replace_keys = m if isinstance(m, dict) else m.groupdict() # merge in raw log data so keys can use it replace_keys.update(r) # increment counter named by applying found tokens to format try: found[exp['format'] % replace_keys] += 1 except (KeyError, ValueError): #print "DEBUG: failed to expand template '%s' with %s" % ( # exp['format'], replace_keys) found[exp['format']] += 1 output = True # found something to report unexpected = False break; #end for if unexpected and r['level'] not in ['debug', 'notice']: # ignore debug and notice messages, too noisy key = hashlib.sha1(r['message']).hexdigest() if key not in dup: extra.append((r, key)) dup[key] += 1 #end for print "%-50s : %7s" % ("error", "count") print "=" * 60 prior_prefix = None def colonFirst (x, y): xe = x.split()[0][-1] ye = y.split()[0][-1] if xe == ye: return cmp(x, y) elif xe == ':': return -1 elif ye == ':': return 1 else: return cmp(x, y) for item in sorted(found.keys(), cmp=colonFirst): if item == IGNORE_ITEM: processed['IGNORED'] = found[item] continue prefix = item.split()[0] if prior_prefix is not None: if ((prior_prefix[-1] == ':' and prefix <> prior_prefix) or (prefix[-1] == ':' and prefix <> prior_prefix) or (prior_prefix[-1] == ':' and prefix[-1] <> ':') ): print print "%-50s : %7d" % (item, found[item]) prior_prefix = prefix print # sort remaining messages by date def date_sort (a, b): return cmp(a[0]['datetime'], b[0]['datetime']); extra.sort(date_sort) if (len(extra) > 0): output = True # found something to report print "Unclassified messages" print "=" * 60 wrapit = textwrap.TextWrapper(subsequent_indent=' ') for t in extra: (r, key) = t dups = dup[key] fmt = "" fargs = [] if dups > 1: fmt += "<{}> " fargs.append(dups) fmt += "[{}] [{}] {}" fargs.extend([r['datetime'], r['level'], r['message']]) maxlines = 10 for line in fmt.format(*fargs).split("\n"): maxlines = maxlines - 1 if maxlines < 0: print " ...truncated..." break if len(line) > wrapit.width: print "\n ".join(wrapit.wrap(line)) else: print " ", line print if not output: print "no data to report" # end if print "\n\n--- processed", for key in sorted(processed.keys()): print "%s=%d" % (key, processed[key]), print #end print_report def compile_expects (elist): """ Compile a list of expectations. Expectations are dictionaries. The key 'format' is expected to exist in the dict and provide a format string for summarizing the log entry. The key 'match' provides a dict of log record field names and regex patterns to match on those fields. Args: expect: list of expectation dicts Returns: list of expectation dicts with values replaced by compiled regexs """ ret = [] for rec in elist: # compile regular expressions for key, value in rec['match'].iteritems(): rec['match'][key] = re.compile(value.replace('\\\\', '\\'), re.DOTALL) ret.append(rec) return ret #end compile_expects if __name__ == '__main__': """simple command line tool to extract named fields from a log on stdin.""" import optparse import os import sys parser = optparse.OptionParser(usage="usage: %prog [options] < example.log") parser.add_option("-y", "--yaml", help="YAML file of expected messages. Multiple uses allowed", action='append', metavar="FILE") (options, args) = parser.parse_args() expect = [] if options.yaml: import yaml expect = [] for abs_path in options.yaml: f = open(abs_path, 'r') expect += yaml.load(f) f.close() #end for expect = compile_expects(expect) print_report(parse_error_log(sys.stdin), expect) # vim:sw=4 ts=4 sts=4 et:
mit
vadimtk/chrome4sdp
build/android/pylib/local/local_test_server_spawner.py
58
1245
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from pylib import chrome_test_server_spawner from pylib import forwarder from pylib.base import test_server class LocalTestServerSpawner(test_server.TestServer): def __init__(self, port, device, tool): super(LocalTestServerSpawner, self).__init__() self._device = device self._spawning_server = chrome_test_server_spawner.SpawningServer( port, device, tool) self._tool = tool @property def server_address(self): return self._spawning_server.server.server_address @property def port(self): return self.server_address[1] #override def SetUp(self): self._device.WriteFile( '%s/net-test-server-ports' % self._device.GetExternalStoragePath(), '%s:0' % str(self.port)) forwarder.Forwarder.Map( [(self.port, self.port)], self._device, self._tool) self._spawning_server.Start() #override def Reset(self): self._spawning_server.CleanupState() #override def TearDown(self): self.Reset() self._spawning_server.Stop() forwarder.Forwarder.UnmapDevicePort(self.port, self._device)
bsd-3-clause
gina-alaska/emodis_ndvi_python-docker
emodis_ndvi_python/pycodes/computemetrics_by1yr.py
2
4565
#calcualte the metrics import numpy as np from getforwardma import * from getbackwardma import * from getcrossover_percentage_extremeslope import * from getsos import * from geteos import * from getmaxndvi import * from gettotndvi import * from getndvitodate import * from getrange import * from getslope import * def computemetrics_by1yr(NDVI,ndvi_raw,bq,bn,wl,bpy,CurrentBand, DaysPerBand): #inputs: #NDVI---smoothed NDVI vector #ndvi_raw---raw NDVI vector #bq---quality vector #bn---band name vector for NDVI #wl---list define moving window wl=[wlb, wlh] #bpy---number of points in one year #CurrentBand---time interval between two points in NDVI, =7 #DaysperBand---ndvi 7-days data, DaysperBand=7 #output:mMetrics #;jzhu, 9/21/2011, this program modified from computemetrics.pro, #;it inputs one-year ndvi time series and rerlated band name vectors,wl,bpy,currentband, and daysperband #;ndvi is smoothed data, ndvi_raw is interpolated data nSize=NDVI.shape Time1=np.array( range(nSize[0]),dtype=np.int16 ) #for 1D NDVI, Time=Time1 # Calculate Forward/Backward moving average FMA=GetForwardMA(NDVI, wl[0]) BMA=GetBackwardMA(NDVI, wl[1]) #; Get crossover points (potential starts/ends) #mv_percent=0.2, user decide this value, 20% of maximun NDVI value mv_percent=0.2 Starts=GetCrossOver_percentage_extremeslope(Time, NDVI, Time, FMA, mv_percent, bq, bpy, 'DOWN') Ends=GetCrossOver_percentage_extremeslope(Time, NDVI, Time, BMA, mv_percent, bq, bpy, 'UP') #; Determine start/end of season SOS=GetSOS(Starts, NDVI,bq,Time, bpy, FMA) #;find the possible sos among the crossovers which is the most close to 20% up threshold point, guarentee possib sos > threshold,and this possible sos must be good point. EOS=GetEOS(Ends, NDVI, bq, Time, bpy,BMA) #; find last 20% point, get the possibx which is the nearest to the last 20% point, compare the possibx and the last 20%point, pick the smaller point in the possibx and 20% point as possib point, then gurrantee this point is good point. #; Generate structures for Start/End of season Start_End = {'SOST':SOS['SOST'], \ 'SOSN':SOS['SOSN'], \ 'EOST':EOS['EOST'], \ 'EOSN':EOS['EOSN'], \ 'FwdMA':FMA, \ 'BkwdMA':BMA \ } #;PRINT, 'COMPUTEMETRICS:NY:',ny #; ny=n_elements(eos.eost) SOST = Start_End['SOST'][0] SOSN = Start_End['SOSN'][0] EOST = Start_End['EOST'][0] EOSN = Start_End['EOSN'][0] MaxND=GetMaxNDVI(ndvi_raw, Time, Start_End,bpy) #;dayindex and related maximun ndvi value TotalNDVI=GetTotNDVI(NDVI, Time, Start_End,bpy,DaysPerBand) #; ndvi*day (it is a ndvi curve minus baseline ), #; baseline( start to end) vector, #; ndvi vector (start to end), #; time vector (start to end). #; GrowingSeasonT=GST, GrowingSeasonN=GSN, GrowingSeasonB=GSB) #NDVItoDate calcualte the integration of NDVI between start oof season SOST and currentBand index NDVItoDate=GetNDVItoDate(NDVI, Time, Start_End, bpy, DaysPerBand, CurrentBand) #; ndvi*day, nowT (dayindex),nowN #NDVItoDate has some problem, need figure out #NDVItoDate={'NDVItoDate':0.0,'NowT':0.0,'NowN':0.0} Slope=GetSlope(Start_End, MaxND, bpy, DaysPerBand) #;slope = ndvi/day Range=GetRange(Start_End, MaxND, bpy, DaysPerBand) #;range.ranget = day, range.rangeN = ndvi mMetrics ={ 'SOST':SOST, \ 'SOSN':SOSN, \ 'EOST':EOST, \ 'EOSN':EOSN, \ 'FwdMA': Start_End['FwdMA'], \ 'BkwdMA': Start_End['BkwdMA'], \ 'SlopeUp': Slope['SlopeUp'], \ 'SlopeDown': Slope['SlopeDown'], \ 'TotalNDVI': TotalNDVI['TotalNDVI'], \ 'GrowingN':TotalNDVI['GSN'], \ 'GrowingT':TotalNDVI['GST'], \ 'GrowingB':TotalNDVI['GSB'], \ 'MaxT': MaxND['MaxT'], \ 'MaxN': MaxND['MaxN'], \ 'RangeT': Range['RangeT'], \ 'RangeN': Range['RangeN'], \ 'NDVItoDate': NDVItoDate['NDVItoDate'], \ 'NowT': NDVItoDate['NowT'], \ 'NowN': NDVItoDate['NowN'] } return mMetrics
mit
undoware/neutron-drive
google_appengine/google/appengine/_internal/django/template/context.py
23
5431
from google.appengine._internal.django.core.exceptions import ImproperlyConfigured from google.appengine._internal.django.utils.importlib import import_module # Cache of actual callables. _standard_context_processors = None # We need the CSRF processor no matter what the user has in their settings, # because otherwise it is a security vulnerability, and we can't afford to leave # this to human error or failure to read migration instructions. _builtin_context_processors = ('google.appengine._internal.django.core.context_processors.csrf',) class ContextPopException(Exception): "pop() has been called more times than push()" pass class BaseContext(object): def __init__(self, dict_=None): dict_ = dict_ or {} self.dicts = [dict_] def __repr__(self): return repr(self.dicts) def __iter__(self): for d in reversed(self.dicts): yield d def push(self): d = {} self.dicts.append(d) return d def pop(self): if len(self.dicts) == 1: raise ContextPopException return self.dicts.pop() def __setitem__(self, key, value): "Set a variable in the current context" self.dicts[-1][key] = value def __getitem__(self, key): "Get a variable's value, starting at the current context and going upward" for d in reversed(self.dicts): if key in d: return d[key] raise KeyError(key) def __delitem__(self, key): "Delete a variable from the current context" del self.dicts[-1][key] def has_key(self, key): for d in self.dicts: if key in d: return True return False def __contains__(self, key): return self.has_key(key) def get(self, key, otherwise=None): for d in reversed(self.dicts): if key in d: return d[key] return otherwise class Context(BaseContext): "A stack container for variable context" def __init__(self, dict_=None, autoescape=True, current_app=None): self.autoescape = autoescape self.current_app = current_app self.render_context = RenderContext() super(Context, self).__init__(dict_) def update(self, other_dict): "Like dict.update(). Pushes an entire dictionary's keys and values onto the context." if not hasattr(other_dict, '__getitem__'): raise TypeError('other_dict must be a mapping (dictionary-like) object.') self.dicts.append(other_dict) return other_dict class RenderContext(BaseContext): """ A stack container for storing Template state. RenderContext simplifies the implementation of template Nodes by providing a safe place to store state between invocations of a node's `render` method. The RenderContext also provides scoping rules that are more sensible for 'template local' variables. The render context stack is pushed before each template is rendered, creating a fresh scope with nothing in it. Name resolution fails if a variable is not found at the top of the RequestContext stack. Thus, variables are local to a specific template and don't affect the rendering of other templates as they would if they were stored in the normal template context. """ def __iter__(self): for d in self.dicts[-1]: yield d def has_key(self, key): return key in self.dicts[-1] def get(self, key, otherwise=None): d = self.dicts[-1] if key in d: return d[key] return otherwise # This is a function rather than module-level procedural code because we only # want it to execute if somebody uses RequestContext. def get_standard_processors(): from google.appengine._internal.django.conf import settings global _standard_context_processors if _standard_context_processors is None: processors = [] collect = [] collect.extend(_builtin_context_processors) collect.extend(settings.TEMPLATE_CONTEXT_PROCESSORS) for path in collect: i = path.rfind('.') module, attr = path[:i], path[i+1:] try: mod = import_module(module) except ImportError, e: raise ImproperlyConfigured('Error importing request processor module %s: "%s"' % (module, e)) try: func = getattr(mod, attr) except AttributeError: raise ImproperlyConfigured('Module "%s" does not define a "%s" callable request processor' % (module, attr)) processors.append(func) _standard_context_processors = tuple(processors) return _standard_context_processors class RequestContext(Context): """ This subclass of template.Context automatically populates itself using the processors defined in TEMPLATE_CONTEXT_PROCESSORS. Additional processors can be specified as a list of callables using the "processors" keyword argument. """ def __init__(self, request, dict=None, processors=None, current_app=None): Context.__init__(self, dict, current_app=current_app) if processors is None: processors = () else: processors = tuple(processors) for processor in get_standard_processors() + processors: self.update(processor(request))
bsd-3-clause
Aimage/shinken
libexec/notify_by_xmpp.py
17
2686
#!/usr/bin/env python # skvidal@fedoraproject.org, modified by David Laval # gplv2+ ## XMPP notification #define command{ # command_name notify-host-by-xmpp # command_line $PLUGINSDIR$/notify_by_xmpp.py -a $PLUGINSDIR$/notify_by_xmpp.ini "Host '$HOSTALIAS$' is $HOSTSTATE$ - Info : $HOSTOUTPUT$" $CONTACTEMAIL$ #} # #define command{ # command_name notify-service-by-xmpp # command_line $PLUGINSDIR$/notify_by_xmpp.py -a $PLUGINSDIR$/notify_by_xmpp.ini "$NOTIFICATIONTYPE$ $HOSTNAME$ $SERVICED ESC$ $SERVICESTATE$ $SERVICEOUTPUT$ $LONGDATETIME$" $CONTACTEMAIL$ #} # needs a config file to get username/pass/other info format is: #[xmpp_account] #server=jabber.org #port=5222 #username=yourusername #password=yourpasssword #resource=monitoring defaults = {'server':'jabber.org', 'port':'5222', 'resource':'monitoring'} # until xmppony is inplace import warnings warnings.simplefilter("ignore") import xmpp from xmpp.protocol import Message from optparse import OptionParser import ConfigParser import sys import os parser = OptionParser() parser.add_option("-a", dest="authfile", default=None, help="file to retrieve username/password/server/port/resource information from") opts, args = parser.parse_args() conf = ConfigParser.ConfigParser(defaults=defaults) if not opts.authfile or not os.path.exists(opts.authfile): print "no config/auth file specified, can't continue" sys.exit(1) conf.read(opts.authfile) if not conf.has_section('xmpp_account') or not conf.has_option('xmpp_account', 'username') or not conf.has_option('xmpp_account', 'password'): print "cannot find at least one of: config section 'xmpp_account' or username or password" sys.exit(1) server = conf.get('xmpp_account', 'server') username = conf.get('xmpp_account', 'username') password = conf.get('xmpp_account', 'password') resource = conf.get('xmpp_account', 'resource') port = conf.get('xmpp_account', 'port') if len(args) < 1: print "xmppsend message [to whom, multiple args]" sys.exit(1) msg = args[0] msg = msg.replace('\\n', '\n') c = xmpp.Client(server=server, port=port, debug=[]) con = c.connect() if not con: print "Error: could not connect to server: %s:%s" % (c.Server, c.Port) sys.exit(1) auth = c.auth(user=username, password=password, resource=resource) if not auth: print "Error: Could not authenticate to server: %s:%s" % (c.Server, c.Port) sys.exit(1) if len(args) < 2: r = c.getRoster() for user in r.keys(): if user == username: continue c.send(Message(user, '%s' % msg)) else: for user in args[1:]: c.send(Message(user, '%s' % msg))
agpl-3.0
EDUlib/edx-platform
lms/djangoapps/verify_student/migrations/0006_ssoverification.py
4
2473
# Generated by Django 1.11.12 on 2018-04-11 15:20 import django.db.models.deletion import django.utils.timezone import model_utils.fields from django.conf import settings from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('verify_student', '0005_remove_deprecated_models'), ] operations = [ migrations.CreateModel( name='SSOVerification', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('status', model_utils.fields.StatusField(choices=[('created', 'created'), ('ready', 'ready'), ('submitted', 'submitted'), ('must_retry', 'must_retry'), ('approved', 'approved'), ('denied', 'denied')], default='created', max_length=100, no_check_for_status=True, verbose_name='status')), ('status_changed', model_utils.fields.MonitorField(default=django.utils.timezone.now, monitor='status', verbose_name='status changed')), ('name', models.CharField(blank=True, max_length=255)), ('created_at', models.DateTimeField(auto_now_add=True, db_index=True)), ('updated_at', models.DateTimeField(auto_now=True, db_index=True)), ( 'identity_provider_type', models.CharField( choices=[ ('common.djangoapps.third_party_auth.models.OAuth2ProviderConfig', 'OAuth2 Provider'), ('common.djangoapps.third_party_auth.models.SAMLProviderConfig', 'SAML Provider'), ('common.djangoapps.third_party_auth.models.LTIProviderConfig', 'LTI Provider'), ], default='common.djangoapps.third_party_auth.models.SAMLProviderConfig', help_text='Specifies which type of Identity Provider this verification originated from.', max_length=100, ), ), ('identity_provider_slug', models.SlugField(default='default', help_text='The slug uniquely identifying the Identity Provider this verification originated from.', max_length=30)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), ]
agpl-3.0
noahc3/PokemonGoTeamManager
libs/py/pogoapi/POGOProtos/Networking/Requests/Messages/EvolvePokemonMessage_pb2.py
16
2353
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: POGOProtos/Networking/Requests/Messages/EvolvePokemonMessage.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='POGOProtos/Networking/Requests/Messages/EvolvePokemonMessage.proto', package='POGOProtos.Networking.Requests.Messages', syntax='proto3', serialized_pb=_b('\nBPOGOProtos/Networking/Requests/Messages/EvolvePokemonMessage.proto\x12\'POGOProtos.Networking.Requests.Messages\"*\n\x14\x45volvePokemonMessage\x12\x12\n\npokemon_id\x18\x01 \x01(\x06\x62\x06proto3') ) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _EVOLVEPOKEMONMESSAGE = _descriptor.Descriptor( name='EvolvePokemonMessage', full_name='POGOProtos.Networking.Requests.Messages.EvolvePokemonMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='pokemon_id', full_name='POGOProtos.Networking.Requests.Messages.EvolvePokemonMessage.pokemon_id', index=0, number=1, type=6, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=111, serialized_end=153, ) DESCRIPTOR.message_types_by_name['EvolvePokemonMessage'] = _EVOLVEPOKEMONMESSAGE EvolvePokemonMessage = _reflection.GeneratedProtocolMessageType('EvolvePokemonMessage', (_message.Message,), dict( DESCRIPTOR = _EVOLVEPOKEMONMESSAGE, __module__ = 'POGOProtos.Networking.Requests.Messages.EvolvePokemonMessage_pb2' # @@protoc_insertion_point(class_scope:POGOProtos.Networking.Requests.Messages.EvolvePokemonMessage) )) _sym_db.RegisterMessage(EvolvePokemonMessage) # @@protoc_insertion_point(module_scope)
mit
MoritzS/django
django/contrib/admin/__init__.py
562
1243
# ACTION_CHECKBOX_NAME is unused, but should stay since its import from here # has been referenced in documentation. from django.contrib.admin.decorators import register from django.contrib.admin.filters import ( AllValuesFieldListFilter, BooleanFieldListFilter, ChoicesFieldListFilter, DateFieldListFilter, FieldListFilter, ListFilter, RelatedFieldListFilter, RelatedOnlyFieldListFilter, SimpleListFilter, ) from django.contrib.admin.helpers import ACTION_CHECKBOX_NAME from django.contrib.admin.options import ( HORIZONTAL, VERTICAL, ModelAdmin, StackedInline, TabularInline, ) from django.contrib.admin.sites import AdminSite, site from django.utils.module_loading import autodiscover_modules __all__ = [ "register", "ACTION_CHECKBOX_NAME", "ModelAdmin", "HORIZONTAL", "VERTICAL", "StackedInline", "TabularInline", "AdminSite", "site", "ListFilter", "SimpleListFilter", "FieldListFilter", "BooleanFieldListFilter", "RelatedFieldListFilter", "ChoicesFieldListFilter", "DateFieldListFilter", "AllValuesFieldListFilter", "RelatedOnlyFieldListFilter", "autodiscover", ] def autodiscover(): autodiscover_modules('admin', register_to=site) default_app_config = 'django.contrib.admin.apps.AdminConfig'
bsd-3-clause
samatdav/zulip
tools/lib/test_server.py
3
3335
from __future__ import print_function import os import subprocess import sys import time from contextlib import contextmanager if False: from typing import (Any, Iterator) try: import django import requests except ImportError as e: print("ImportError: {}".format(e)) print("You need to run the Zulip tests inside a Zulip dev environment.") print("If you are using Vagrant, you can `vagrant ssh` to enter the Vagrant guest.") sys.exit(1) TOOLS_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) if TOOLS_DIR not in sys.path: sys.path.insert(0, os.path.dirname(TOOLS_DIR)) from zerver.lib.test_fixtures import is_template_database_current def set_up_django(external_host): # type: (str) -> None os.environ['EXTERNAL_HOST'] = external_host os.environ["TORNADO_SERVER"] = "http://127.0.0.1:9983" os.environ['DJANGO_SETTINGS_MODULE'] = 'zproject.test_settings' django.setup() os.environ['PYTHONUNBUFFERED'] = 'y' def assert_server_running(server, log_file): # type: (subprocess.Popen, str) -> None """Get the exit code of the server, or None if it is still running.""" if server.poll() is not None: message = 'Server died unexpectedly!' if log_file: message += '\nSee %s\n' % (log_file,) raise RuntimeError(message) def server_is_up(server, log_file): # type: (subprocess.Popen, str) -> bool assert_server_running(server, log_file) try: # We could get a 501 error if the reverse proxy is up but the Django app isn't. return requests.get('http://127.0.0.1:9981/accounts/home').status_code == 200 except: return False @contextmanager def test_server_running(force=False, external_host='testserver', log_file=None, dots=False, use_db=True): # type: (bool, str, str, bool, bool) -> Iterator[None] if log_file: if os.path.exists(log_file) and os.path.getsize(log_file) < 100000: log = open(log_file, 'a') log.write('\n\n') else: log = open(log_file, 'w') else: log = sys.stdout # type: ignore # BinaryIO vs. IO[str] set_up_django(external_host) if use_db: generate_fixtures_command = ['tools/setup/generate-fixtures'] if not is_template_database_current(): generate_fixtures_command.append('--force') subprocess.check_call(generate_fixtures_command) # Run this not through the shell, so that we have the actual PID. run_dev_server_command = ['tools/run-dev.py', '--test'] if force: run_dev_server_command.append('--force') server = subprocess.Popen(run_dev_server_command, stdout=log, stderr=log) try: # Wait for the server to start up. sys.stdout.write('Waiting for test server') while not server_is_up(server, log_file): if dots: sys.stdout.write('.') sys.stdout.flush() time.sleep(0.1) sys.stdout.write('\n') # DO OUR ACTUAL TESTING HERE!!! yield finally: assert_server_running(server, log_file) server.terminate() if __name__ == '__main__': # The code below is for testing this module works with test_server_running(): print('\n\n SERVER IS UP!\n\n')
apache-2.0
BaesFr/Sick-Beard
lib/hachoir_metadata/qt/dialog_ui.py
94
2439
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'hachoir_metadata/qt/dialog.ui' # # Created: Mon Jul 26 03:10:06 2010 # by: PyQt4 UI code generator 4.7.3 # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui class Ui_Form(object): def setupUi(self, Form): Form.setObjectName("Form") Form.resize(441, 412) self.verticalLayout = QtGui.QVBoxLayout(Form) self.verticalLayout.setObjectName("verticalLayout") self.horizontalLayout_2 = QtGui.QHBoxLayout() self.horizontalLayout_2.setObjectName("horizontalLayout_2") self.open_button = QtGui.QPushButton(Form) self.open_button.setObjectName("open_button") self.horizontalLayout_2.addWidget(self.open_button) self.files_combo = QtGui.QComboBox(Form) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.files_combo.sizePolicy().hasHeightForWidth()) self.files_combo.setSizePolicy(sizePolicy) self.files_combo.setObjectName("files_combo") self.horizontalLayout_2.addWidget(self.files_combo) self.verticalLayout.addLayout(self.horizontalLayout_2) self.metadata_table = QtGui.QTableWidget(Form) self.metadata_table.setAlternatingRowColors(True) self.metadata_table.setShowGrid(False) self.metadata_table.setRowCount(0) self.metadata_table.setColumnCount(0) self.metadata_table.setObjectName("metadata_table") self.metadata_table.setColumnCount(0) self.metadata_table.setRowCount(0) self.verticalLayout.addWidget(self.metadata_table) self.quit_button = QtGui.QPushButton(Form) self.quit_button.setObjectName("quit_button") self.verticalLayout.addWidget(self.quit_button) self.retranslateUi(Form) QtCore.QMetaObject.connectSlotsByName(Form) def retranslateUi(self, Form): Form.setWindowTitle(QtGui.QApplication.translate("Form", "hachoir-metadata", None, QtGui.QApplication.UnicodeUTF8)) self.open_button.setText(QtGui.QApplication.translate("Form", "Open", None, QtGui.QApplication.UnicodeUTF8)) self.quit_button.setText(QtGui.QApplication.translate("Form", "Quit", None, QtGui.QApplication.UnicodeUTF8))
gpl-3.0