id
int64
0
300k
label
stringlengths
1
74
text
stringlengths
4k
8k
5,600
preprocess
# Copyright (c) Alibaba, Inc. and its affiliates. import math import os.path as osp from typing import Any, Dict import cv2 import numpy as np import PIL import torch from modelscope.metainfo import Pipelines from modelscope.outputs import OutputKeys from modelscope.pipelines.base import Input, Pipeline from modelscope.pipelines.builder import PIPELINES from modelscope.pipelines.cv.ocr_utils.model_resnet18_half import \ LicensePlateDet from modelscope.pipelines.cv.ocr_utils.table_process import ( bbox_decode, bbox_post_process, decode_by_ind, get_affine_transform, nms) from modelscope.preprocessors import load_image from modelscope.preprocessors.image import LoadImage from modelscope.utils.config import Config from modelscope.utils.constant import ModelFile, Tasks from modelscope.utils.logger import get_logger logger = get_logger() @PIPELINES.register_module( Tasks.license_plate_detection, module_name=Pipelines.license_plate_detection) class LicensePlateDetection(Pipeline): def __init__(self, model: str, **kwargs): """ Args: model: model id on modelscope hub. """ super().__init__(model=model, **kwargs) model_path = osp.join(self.model, ModelFile.TORCH_MODEL_FILE) config_path = osp.join(self.model, ModelFile.CONFIGURATION) logger.info(f'loading model from {model_path}') self.cfg = Config.from_file(config_path) self.K = self.cfg.K self.car_type = self.cfg.Type self.device = torch.device( 'cuda' if torch.cuda.is_available() else 'cpu') self.infer_model = LicensePlateDet() checkpoint = torch.load(model_path, map_location=self.device) if 'state_dict' in checkpoint: self.infer_model.load_state_dict(checkpoint['state_dict']) else: self.infer_model.load_state_dict(checkpoint) self.infer_model = self.infer_model.to(self.device) self.infer_model.to(self.device).eval() def METHOD_NAME(self, input: Input) -> Dict[str, Any]: img = LoadImage.convert_to_ndarray(input)[:, :, ::-1] mean = np.array([0.408, 0.447, 0.470], dtype=np.float32).reshape(1, 1, 3) std = np.array([0.289, 0.274, 0.278], dtype=np.float32).reshape(1, 1, 3) height, width = img.shape[0:2] inp_height, inp_width = 512, 512 c = np.array([width / 2., height / 2.], dtype=np.float32) s = max(height, width) * 1.0 trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height]) resized_image = cv2.resize(img, (width, height)) inp_image = cv2.warpAffine( resized_image, trans_input, (inp_width, inp_height), flags=cv2.INTER_LINEAR) inp_image = ((inp_image / 255. - mean) / std).astype(np.float32) images = inp_image.transpose(2, 0, 1).reshape(1, 3, inp_height, inp_width) images = torch.from_numpy(images).to(self.device) meta = { 'c': c, 's': s, 'input_height': inp_height, 'input_width': inp_width, 'out_height': inp_height // 4, 'out_width': inp_width // 4 } result = {'img': images, 'meta': meta} return result def forward(self, input: Dict[str, Any]) -> Dict[str, Any]: pred = self.infer_model(input['img']) return {'results': pred, 'meta': input['meta']} def postprocess(self, inputs: Dict[str, Any]) -> Dict[str, Any]: output = inputs['results'][0] meta = inputs['meta'] hm = output['hm'].sigmoid_() ftype = output['ftype'].sigmoid_() wh = output['wh'] reg = output['reg'] bbox, inds = bbox_decode(hm, wh, reg=reg, K=self.K) car_type = decode_by_ind(ftype, inds, K=self.K).detach().cpu().numpy() bbox = bbox.detach().cpu().numpy() for i in range(bbox.shape[1]): bbox[0][i][9] = car_type[0][i] bbox = nms(bbox, 0.3) bbox = bbox_post_process(bbox.copy(), [meta['c'].cpu().numpy()], [meta['s']], meta['out_height'], meta['out_width']) res, Type = [], [] for box in bbox[0]: if box[8] > 0.3: res.append(box[0:8]) Type.append(self.car_type[int(box[9])]) result = {OutputKeys.POLYGONS: np.array(res), OutputKeys.TEXT: Type} return result
5,601
absent strategy
#!/usr/bin/python # -*- coding: utf-8 -*- # # Scaleway Security Group Rule management module # # Copyright (C) 2018 Antoine Barbare (antoinebarbare@gmail.com). # # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later from __future__ import absolute_import, division, print_function __metaclass__ = type DOCUMENTATION = ''' --- module: scaleway_security_group_rule short_description: Scaleway Security Group Rule management module author: Antoine Barbare (@abarbare) description: - "This module manages Security Group Rule on Scaleway account U(https://developer.scaleway.com)." extends_documentation_fragment: - community.general.scaleway - community.general.attributes attributes: check_mode: support: full diff_mode: support: none options: state: type: str description: - Indicate desired state of the Security Group Rule. default: present choices: - present - absent region: type: str description: - Scaleway region to use (for example V(par1)). required: true choices: - ams1 - EMEA-NL-EVS - par1 - EMEA-FR-PAR1 - par2 - EMEA-FR-PAR2 - waw1 - EMEA-PL-WAW1 protocol: type: str description: - Network protocol to use. choices: - TCP - UDP - ICMP required: true port: description: - Port related to the rule, null value for all the ports. required: true type: int ip_range: type: str description: - IPV4 CIDR notation to apply to the rule. default: 0.0.0.0/0 direction: type: str description: - Rule direction. choices: - inbound - outbound required: true action: type: str description: - Rule action. choices: - accept - drop required: true security_group: type: str description: - Security Group unique identifier. required: true ''' EXAMPLES = ''' - name: Create a Security Group Rule community.general.scaleway_security_group_rule: state: present region: par1 protocol: TCP port: 80 ip_range: 0.0.0.0/0 direction: inbound action: accept security_group: b57210ee-1281-4820-a6db-329f78596ecb register: security_group_rule_creation_task ''' RETURN = ''' data: description: This is only present when O(state=present). returned: when O(state=present) type: dict sample: { "scaleway_security_group_rule": { "direction": "inbound", "protocol": "TCP", "ip_range": "0.0.0.0/0", "dest_port_from": 80, "action": "accept", "position": 2, "dest_port_to": null, "editable": null, "id": "10cb0b9a-80f6-4830-abd7-a31cd828b5e9" } } ''' from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway, payload_from_object from ansible.module_utils.basic import AnsibleModule def get_sgr_from_api(security_group_rules, security_group_rule): """ Check if a security_group_rule specs are present in security_group_rules Return None if no rules match the specs Return the rule if found """ for sgr in security_group_rules: if (sgr['ip_range'] == security_group_rule['ip_range'] and sgr['dest_port_from'] == security_group_rule['dest_port_from'] and sgr['direction'] == security_group_rule['direction'] and sgr['action'] == security_group_rule['action'] and sgr['protocol'] == security_group_rule['protocol']): return sgr return None def present_strategy(api, security_group_id, security_group_rule): ret = {'changed': False} response = api.get('security_groups/%s/rules' % security_group_id) if not response.ok: api.module.fail_json( msg='Error getting security group rules "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json)) existing_rule = get_sgr_from_api( response.json['rules'], security_group_rule) if not existing_rule: ret['changed'] = True if api.module.check_mode: return ret # Create Security Group Rule response = api.post('/security_groups/%s/rules' % security_group_id, data=payload_from_object(security_group_rule)) if not response.ok: api.module.fail_json( msg='Error during security group rule creation: "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json)) ret['scaleway_security_group_rule'] = response.json['rule'] else: ret['scaleway_security_group_rule'] = existing_rule return ret def METHOD_NAME(api, security_group_id, security_group_rule): ret = {'changed': False} response = api.get('security_groups/%s/rules' % security_group_id) if not response.ok: api.module.fail_json( msg='Error getting security group rules "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json)) existing_rule = get_sgr_from_api( response.json['rules'], security_group_rule) if not existing_rule: return ret ret['changed'] = True if api.module.check_mode: return ret response = api.delete( '/security_groups/%s/rules/%s' % (security_group_id, existing_rule['id'])) if not response.ok: api.module.fail_json( msg='Error deleting security group rule "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json)) return ret def core(module): api = Scaleway(module=module) security_group_rule = { 'protocol': module.params['protocol'], 'dest_port_from': module.params['port'], 'ip_range': module.params['ip_range'], 'direction': module.params['direction'], 'action': module.params['action'], } region = module.params['region'] module.params['api_url'] = SCALEWAY_LOCATION[region]['api_endpoint'] if module.params['state'] == 'present': summary = present_strategy( api=api, security_group_id=module.params['security_group'], security_group_rule=security_group_rule) else: summary = METHOD_NAME( api=api, security_group_id=module.params['security_group'], security_group_rule=security_group_rule) module.exit_json(**summary) def main(): argument_spec = scaleway_argument_spec() argument_spec.update( state=dict(type='str', default='present', choices=['absent', 'present']), region=dict(type='str', required=True, choices=list(SCALEWAY_LOCATION.keys())), protocol=dict(type='str', required=True, choices=['TCP', 'UDP', 'ICMP']), port=dict(type='int', required=True), ip_range=dict(type='str', default='0.0.0.0/0'), direction=dict(type='str', required=True, choices=['inbound', 'outbound']), action=dict(type='str', required=True, choices=['accept', 'drop']), security_group=dict(type='str', required=True), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) core(module) if __name__ == '__main__': main()
5,602
info check
import re from virttest import error_context from virttest import utils_misc @error_context.context_aware def run(test, params, env): """ KVM Seabios test: 1) Start guest with sga bios 2) Check the sga bios messages(optional) 3) Restart the vm, verify it's reset(optional) 4) Display and check the boot menu order 5) Start guest from the specified boot entry 6) Log into the guest to verify it's up :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def get_output(session_obj): """ Use the function to short the lines in the scripts """ if params["enable_sga"] == "yes": output = session_obj.get_stripped_output() else: output = session_obj.get_output() return output def boot_menu(): return re.search(boot_menu_hint, get_output(seabios_session)) def boot_menu_check(): return (len(re.findall(boot_menu_hint, get_output(seabios_session))) > 1) error_context.context("Start guest with sga bios", test.log.info) vm = env.get_vm(params["main_vm"]) # Since the seabios is displayed in the beginning of guest boot, # booting guest here so that we can check all of sgabios/seabios # info, especially the correct time of sending boot menu key. vm.create() timeout = float(params.get("login_timeout", 240)) boot_menu_key = params.get("boot_menu_key", 'esc') restart_key = params.get("restart_key") boot_menu_hint = params.get("boot_menu_hint") boot_device = params.get("boot_device", "") sgabios_info = params.get("sgabios_info") seabios_session = vm.logsessions['seabios'] if sgabios_info: error_context.context("Display and check the SGABIOS info", test.log.info) def METHOD_NAME(): return re.search(sgabios_info, get_output(vm.serial_console)) if not utils_misc.wait_for(METHOD_NAME, timeout, 1): err_msg = "Cound not get sgabios message. The output" err_msg += " is %s" % get_output(vm.serial_console) test.fail(err_msg) if not (boot_menu_hint and utils_misc.wait_for(boot_menu, timeout, 1)): test.fail("Could not get boot menu message.") if restart_key: error_context.context("Restart vm and check it's ok", test.log.info) seabios_text = get_output(seabios_session) headline = seabios_text.split("\n")[0] + "\n" headline_count = seabios_text.count(headline) vm.send_key(restart_key) def reboot_check(): return get_output(seabios_session).count(headline) > headline_count if not utils_misc.wait_for(reboot_check, timeout, 1): test.fail("Could not restart the vm") if not (boot_menu_hint and utils_misc.wait_for(boot_menu_check, timeout, 1)): test.fail("Could not get boot menu message after rebooting") # Send boot menu key in monitor. vm.send_key(boot_menu_key) error_context.context("Display and check the boot menu order", test.log.info) def get_list(): return re.findall(r"^\d+\. (.*)\s", get_output(seabios_session), re.M) boot_list = utils_misc.wait_for(get_list, timeout, 1) if not boot_list: test.fail("Could not get boot entries list.") test.log.info("Got boot menu entries: '%s'", boot_list) for i, v in enumerate(boot_list, start=1): if re.search(boot_device, v, re.I): error_context.context("Start guest from boot entry '%s'" % v, test.log.info) vm.send_key(str(i)) break else: test.fail("Could not get any boot entry match " "pattern '%s'" % boot_device) error_context.context("Log into the guest to verify it's up") session = vm.wait_for_login(timeout=timeout) session.close()
5,603
import texture source
"""This script contains helper methods for texture pathing.""" # ***** BEGIN LICENSE BLOCK ***** # # Copyright © 2020, NIF File Format Library and Tools contributors. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # * Neither the name of the NIF File Format Library and Tools # project nor the names of its contributors may be used to endorse # or promote products derived from this software without specific # prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # ***** END LICENSE BLOCK ***** from functools import reduce import operator import traceback import os.path import bpy from generated.formats.nif import classes as NifClasses from io_scene_niftools.utils.singleton import NifOp from io_scene_niftools.utils.logging import NifLog class TextureLoader: external_textures = set() @staticmethod def load_image(tex_path): """Returns an image or a generated image if none was found""" name = os.path.basename(tex_path) if name not in bpy.data.images: try: b_image = bpy.data.images.load(tex_path) except: NifLog.warn(f"Texture '{name}' not found or not supported and no alternate available") b_image = bpy.data.images.new(name=name, width=1, height=1, alpha=True) b_image.filepath = tex_path else: b_image = bpy.data.images[name] return b_image def METHOD_NAME(self, source): """Convert a NiSourceTexture block, or simply a path string, to a Blender Texture object. :return Texture object """ # if the source block is not linked then return None if not source: return None if isinstance(source, NifClasses.NiSourceTexture) and not source.use_external and NifOp.props.use_embedded_texture: return self.import_embedded_texture_source(source) else: return self.import_external_source(source) def import_embedded_texture_source(self, source): # first try to use the actual file name of this NiSourceTexture tex_name = source.file_name tex_path = os.path.join(os.path.dirname(NifOp.props.filepath), tex_name) # not set, then use generated sequence name if not tex_name: tex_path = self.generate_image_name() # only save them once per run, obviously only useful if file_name was set if tex_path not in self.external_textures: # save embedded texture as dds file with open(tex_path, "wb") as stream: try: NifLog.info(f"Saving embedded texture as {tex_path}") source.pixel_data.save_as_dds(stream) except ValueError: NifLog.warn(f"Pixel format not supported in embedded texture {tex_path}!") traceback.print_exc() self.external_textures.add(tex_path) return self.load_image(tex_path) @staticmethod def generate_image_name(): """Find a file name (but avoid overwriting)""" n = 0 while n < 10000: fn = f"image{n:0>4d}.dds" tex = os.path.join(os.path.dirname(NifOp.props.filepath), fn) if not os.path.exists(tex): break n += 1 return tex def import_external_source(self, source): # the texture uses an external image file if isinstance(source, NifClasses.NiSourceTexture): fn = source.file_name elif isinstance(source, str): fn = source else: raise TypeError("source must be NiSourceTexture or str") fn = fn.replace('\\', os.sep) fn = fn.replace('/', os.sep) # go searching for it import_path = os.path.dirname(NifOp.props.filepath) search_path_list = [import_path] if bpy.context.preferences.filepaths.texture_directory: search_path_list.append(bpy.context.preferences.filepaths.texture_directory) # TODO [general][path] Implement full texture path finding. nif_dir = os.path.join(os.getcwd(), 'nif') search_path_list.append(nif_dir) # if it looks like a Morrowind style path, use common sense to guess texture path meshes_index = import_path.lower().find("meshes") if meshes_index != -1: search_path_list.append(import_path[:meshes_index] + 'textures') # if it looks like a Civilization IV style path, use common sense to guess texture path art_index = import_path.lower().find("art") if art_index != -1: search_path_list.append(import_path[:art_index] + 'shared') # go through all texture search paths for texdir in search_path_list: if texdir[0:2] == "//": # Blender-specific directory, slows down resolve_ncase: relative = True texdir = texdir[2:] else: relative = False texdir = texdir.replace('\\', os.sep) texdir = texdir.replace('/', os.sep) # go through all possible file names, try alternate extensions too; for linux, also try lower case versions of filenames texfns = reduce(operator.add, [[fn[:-4] + ext, fn[:-4].lower() + ext] for ext in ('.DDS', '.dds', '.PNG', '.png', '.TGA', '.tga', '.BMP', '.bmp', '.JPG', '.jpg')]) texfns = [fn, fn.lower()] + list(set(texfns)) for texfn in texfns: # now a little trick, to satisfy many Morrowind mods if texfn[:9].lower() == 'textures' + os.sep and texdir[-9:].lower() == os.sep + 'textures': # strip one of the two 'textures' from the path tex = os.path.join(texdir[:-9], texfn) else: tex = os.path.join(texdir, texfn) # "ignore case" on linuxW if relative: tex = bpy.path.abspath("//" + tex) tex = bpy.path.resolve_ncase(tex) NifLog.debug(f"Searching {tex}") if os.path.exists(tex): if relative: return self.load_image(bpy.path.relpath(tex)) else: return self.load_image(tex) else: tex = fn # probably not found, but load a dummy regardless return self.load_image(tex)
5,604
require
__all__ = ['require'] import subprocess, os, codecs, glob from .evaljs import translate_js, DEFAULT_HEADER from .translators.friendly_nodes import is_valid_py_name import six import tempfile import hashlib import random DID_INIT = False DIRNAME = tempfile.mkdtemp() PY_NODE_MODULES_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'py_node_modules') def _init(): global DID_INIT if DID_INIT: return assert subprocess.call( 'node -v', shell=True, cwd=DIRNAME ) == 0, 'You must have node installed! run: brew install node' assert subprocess.call( 'cd %s;npm install babel-core babel-cli babel-preset-es2015 babel-polyfill babelify browserify browserify-shim' % repr(DIRNAME), shell=True, cwd=DIRNAME) == 0, 'Could not link required node_modules' DID_INIT = True ADD_TO_GLOBALS_FUNC = ''' ;function addToGlobals(name, obj) { if (!Object.prototype.hasOwnProperty('_fake_exports')) { Object.prototype._fake_exports = {}; } Object.prototype._fake_exports[name] = obj; }; ''' # subprocess.call("""node -e 'require("browserify")'""", shell=True) GET_FROM_GLOBALS_FUNC = ''' ;function getFromGlobals(name) { if (!Object.prototype.hasOwnProperty('_fake_exports')) { throw Error("Could not find any value named "+name); } if (Object.prototype._fake_exports.hasOwnProperty(name)) { return Object.prototype._fake_exports[name]; } else { throw Error("Could not find any value named "+name); } }; ''' def _get_module_py_name(module_name): return module_name.replace('-', '_') def _get_module_var_name(module_name): cand = _get_module_py_name(module_name).rpartition('/')[-1] if not is_valid_py_name(cand): raise ValueError( "Invalid Python module name %s (generated from %s). Unsupported/invalid npm module specification?" % ( repr(cand), repr(module_name))) return cand def _get_and_translate_npm_module(module_name, include_polyfill=False, update=False, maybe_version_str=""): assert isinstance(module_name, str), 'module_name must be a string!' py_name = _get_module_py_name(module_name) module_filename = '%s.py' % py_name var_name = _get_module_var_name(module_name) if not os.path.exists(os.path.join(PY_NODE_MODULES_PATH, module_filename)) or update: _init() module_hash = hashlib.sha1(module_name.encode("utf-8")).hexdigest()[:15] version = random.randrange(10000000000000) in_file_name = 'in_%s_%d.js' % (module_hash, version) out_file_name = 'out_%s_%d.js' % (module_hash, version) code = ADD_TO_GLOBALS_FUNC if include_polyfill: code += "\n;require('babel-polyfill');\n" code += """ var module_temp_love_python = require(%s); addToGlobals(%s, module_temp_love_python); """ % (repr(module_name), repr(module_name)) with open(os.path.join(DIRNAME, in_file_name), 'wb') as f: f.write(code.encode('utf-8') if six.PY3 else code) pkg_name = module_name.partition('/')[0] if maybe_version_str: pkg_name += '@' + maybe_version_str # make sure the module is installed assert subprocess.call( 'cd %s;npm install %s' % (repr(DIRNAME), pkg_name), shell=True, cwd=DIRNAME ) == 0, 'Could not install the required module: ' + pkg_name # convert the module assert subprocess.call( '''node -e "(require('browserify')('./%s').bundle(function (err,data) {if (err) {console.log(err);throw new Error(err);};fs.writeFile('%s', require('babel-core').transform(data, {'presets': require('babel-preset-es2015')}).code, ()=>{});}))"''' % (in_file_name, out_file_name), shell=True, cwd=DIRNAME, ) == 0, 'Error when converting module to the js bundle' os.remove(os.path.join(DIRNAME, in_file_name)) with codecs.open(os.path.join(DIRNAME, out_file_name), "r", "utf-8") as f: js_code = f.read() print("Bundled JS library dumped at: %s" % os.path.join(DIRNAME, out_file_name)) if len(js_code) < 50: raise RuntimeError("Candidate JS bundle too short - likely browserify issue.") js_code += GET_FROM_GLOBALS_FUNC js_code += ';var %s = getFromGlobals(%s);%s' % ( var_name, repr(module_name), var_name) print('Please wait, translating...') py_code = translate_js(js_code) dirname = os.path.dirname( os.path.join(PY_NODE_MODULES_PATH, module_filename)) if not os.path.isdir(dirname): os.makedirs(dirname) with open(os.path.join(PY_NODE_MODULES_PATH, module_filename), 'wb') as f: f.write(py_code.encode('utf-8') if six.PY3 else py_code) else: with codecs.open( os.path.join(PY_NODE_MODULES_PATH, module_filename), "r", "utf-8") as f: py_code = f.read() return py_code def METHOD_NAME(module_name, include_polyfill=True, update=False, context=None): """ Installs the provided npm module, exports a js bundle via browserify, converts to ECMA 5.1 via babel and finally translates the generated JS bundle to Python via Js2Py. Returns a pure python object that behaves like the installed module. Nice! :param module_name: Name of the npm module to require. For example 'esprima'. Supports specific versions via @ specification. Eg: 'crypto-js@3.3'. :param include_polyfill: Whether the babel-polyfill should be included as part of the translation. May be needed for some modules that use unsupported features of JS6 such as Map or typed arrays. :param update: Whether to force update the translation. Otherwise uses a cached version if exists. :param context: Optional context in which the translated module should be executed in. If provided, the header (js2py imports) will be skipped as it is assumed that the context already has all the necessary imports. :return: The JsObjectWrapper containing the translated module object. Can be used like a standard python object. """ module_name, maybe_version = (module_name+"@@@").split('@')[:2] py_code = _get_and_translate_npm_module(module_name, include_polyfill=include_polyfill, update=update, maybe_version_str=maybe_version) # this is a bit hacky but we need to strip the default header from the generated code... if context is not None: if not py_code.startswith(DEFAULT_HEADER): # new header version? retranslate... assert not update, "Unexpected header." py_code = _get_and_translate_npm_module(module_name, include_polyfill=include_polyfill, update=True) assert py_code.startswith(DEFAULT_HEADER), "Unexpected header." py_code = py_code[len(DEFAULT_HEADER):] context = {} if context is None else context exec(py_code, context) return context['var'][_get_module_var_name(module_name)].to_py()
5,605
set env vars
# (C) Datadog, Inc. 2018-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) import json import os from base64 import urlsafe_b64decode, urlsafe_b64encode from six import iteritems DDTRACE_OPTIONS_LIST = [ 'DD_TAGS', 'DD_TRACE*', 'DD_PROFILING*', 'DD_SERVICE', 'DD_AGENT_HOST', 'DD_ENV', ] E2E_PREFIX = 'DDEV_E2E' E2E_ENV_VAR_PREFIX = '{}_ENV_'.format(E2E_PREFIX) E2E_SET_UP = '{}_UP'.format(E2E_PREFIX) E2E_TEAR_DOWN = '{}_DOWN'.format(E2E_PREFIX) E2E_PARENT_PYTHON = '{}_PYTHON_PATH'.format(E2E_PREFIX) E2E_FIXTURE_NAME = 'dd_environment' TESTING_PLUGIN = 'DDEV_TESTING_PLUGIN' SKIP_ENVIRONMENT = 'DDEV_SKIP_ENV' JMX_TO_INAPP_TYPES = { 'counter': 'gauge', # JMX counter -> DSD gauge -> in-app gauge 'rate': 'gauge', # JMX rate -> DSD gauge -> in-app gauge 'monotonic_count': 'rate', # JMX monotonic_count -> DSD count -> in-app rate # TODO: Support JMX histogram # JMX histogram -> DSD histogram -> multiple in-app metrics (max, median, avg, count) } EVENT_PLATFORM_EVENT_TYPES = [ 'dbm-samples', 'dbm-metrics', 'dbm-activity', 'network-devices-metadata', ] def e2e_active(): return ( E2E_SET_UP in os.environ or E2E_TEAR_DOWN in os.environ or E2E_PARENT_PYTHON in os.environ or any(ev.startswith(E2E_ENV_VAR_PREFIX) for ev in os.environ) ) def e2e_testing(): return E2E_PARENT_PYTHON in os.environ def METHOD_NAME(env_vars): for key, value in iteritems(env_vars): key = '{}{}'.format(E2E_ENV_VAR_PREFIX, key) os.environ[key] = value def remove_env_vars(env_vars): for ev in env_vars: os.environ.pop('{}{}'.format(E2E_ENV_VAR_PREFIX, ev), None) def get_env_vars(raw=False): if raw: return {key: value for key, value in iteritems(os.environ) if key.startswith(E2E_ENV_VAR_PREFIX)} else: env_vars = {} for key, value in iteritems(os.environ): _, found, ev = key.partition(E2E_ENV_VAR_PREFIX) if found: # Normalize casing for Windows env_vars[ev.lower()] = value return env_vars def get_state(key, default=None): value = get_env_vars().get(key.lower()) if value is None: return default return deserialize_data(value) def save_state(key, value): METHOD_NAME({key.lower(): serialize_data(value)}) def set_up_env(): return os.getenv(E2E_SET_UP, 'true') != 'false' def tear_down_env(): return os.getenv(E2E_TEAR_DOWN, 'true') != 'false' def format_config(config): if 'instances' not in config: config = {'instances': [config]} # Agent 5 requires init_config if 'init_config' not in config: config = dict(init_config={}, **config) return config def replay_check_run(agent_collector, stub_aggregator, stub_agent): errors = [] for collector in agent_collector: aggregator = collector['aggregator'] inventories = collector.get('inventories') runner = collector.get('runner', {}) check_id = runner.get('CheckID', '') check_name = runner.get('CheckName', '') if inventories: for metadata in inventories.values(): for meta_key, meta_val in metadata.items(): stub_agent.set_check_metadata(check_name, meta_key, meta_val) for data in aggregator.get('metrics', []): for _, value in data['points']: raw_metric_type = data['type'] if data.get('source_type_name') == 'JMX': raw_metric_type = JMX_TO_INAPP_TYPES.get(raw_metric_type, raw_metric_type) metric_type = stub_aggregator.METRIC_ENUM_MAP[raw_metric_type] stub_aggregator.submit_metric_e2e( # device is only present when replaying e2e tests. In integration tests it will be a tag check_name, check_id, metric_type, data['metric'], value, data['tags'], data['host'], data.get('device'), ) for ep_event_type in EVENT_PLATFORM_EVENT_TYPES: ep_events = aggregator.get(ep_event_type) or [] for event in ep_events: stub_aggregator.submit_event_platform_event( check_name, check_id, json.dumps(event['UnmarshalledEvent']), event['EventType'], ) for data in aggregator.get('service_checks', []): stub_aggregator.submit_service_check( check_name, check_id, data['check'], data['status'], data['tags'], data['host_name'], data['message'] ) if runner.get('LastError'): try: new_errors = json.loads(runner['LastError']) except json.decoder.JSONDecodeError: new_errors = [ { 'message': str(runner['LastError']), 'traceback': '', } ] errors.extend(new_errors) if errors: raise Exception("\n".join("Message: {}\n{}".format(err['message'], err['traceback']) for err in errors)) def serialize_data(data): data = json.dumps(data, separators=(',', ':')) # Using base64 ensures: # 1. Printing to stdout won't fail # 2. Easy parsing since there are no spaces # # TODO: Remove str() when we drop Python 2 return str(urlsafe_b64encode(data.encode('utf-8')).decode('utf-8')) def deserialize_data(data): decoded = urlsafe_b64decode(data.encode('utf-8')) return json.loads(decoded.decode('utf-8'))
5,606
dedupe
# Copyright (c) Meta Platforms, Inc. and affiliates. import vpdq import json import typing as t import pathlib from dataclasses import dataclass from threatexchange.signal_type.pdq.pdq_utils import PDQ_HEX_STR_LEN QUALITY = "quality" HASH = "hash" TIMESTAMP = "timestamp" VPDQ_TIMESTAMP_PRECISION = 3 VPDQ_QUALITY_THRESHOLD = 50 VPDQ_DISTANCE_THRESHOLD = 31 VPDQ_QUERY_MATCH_THRESHOLD_PERCENT = 80.0 VPDQ_INDEX_MATCH_THRESHOLD_PERCENT = 0.0 @dataclass class VPDQMatchResult: """Data class for VPDQ match result""" query_match_percent: float = 0.0 compared_match_percent: float = 0.0 @dataclass class VpdqCompactFeature: """A VPDQ Feature with a subset of fields needed for matching""" pdq_hex: str quality: int timestamp: float def assert_valid(self) -> "VpdqCompactFeature": """Checks the bounds of all the elements, throws ValueError if invalid""" if len(self.pdq_hex) != PDQ_HEX_STR_LEN: raise ValueError("malformed pdq hash") int(self.pdq_hex, 16) # For ValueError if not (0 <= self.quality <= 100): raise ValueError("invalid VPDQ quality") if self.timestamp < 0: raise ValueError("invalid timestamp") return self @classmethod def from_vpdq_feature(cls, feature: vpdq.VpdqFeature) -> "VpdqCompactFeature": return cls(feature.hex, feature.quality, feature.timestamp) @classmethod def from_str(cls, serialized: str) -> "VpdqCompactFeature": """Convert from a string back to the class - the inverse of __str__""" parts = serialized.split(",") try: pdq_hex, qual_str, time_str = parts # Wrong count = ValueError return cls(pdq_hex, int(qual_str), float(time_str)).assert_valid() except ValueError: raise ValueError(f"invalid {cls.__name__} serialization: {serialized}") def __str__(self) -> str: return f"{self.pdq_hex},{self.quality},{self.timestamp:.{VPDQ_TIMESTAMP_PRECISION}}" def hash_file_compact( filepath: str, seconds_per_hash: float = 1.0 ) -> t.List[VpdqCompactFeature]: """Wrapper around computeHash to instead return compact features""" vpdq_hashes = vpdq.computeHash(str(filepath), seconds_per_hash=seconds_per_hash) return [VpdqCompactFeature.from_vpdq_feature(f) for f in vpdq_hashes] def vpdq_to_json( vpdq_features: t.List[VpdqCompactFeature], *, indent: t.Optional[int] = None ) -> str: """Convert from VPDQ features to json object and return the json object as a str""" return json.dumps([str(f.assert_valid()) for f in vpdq_features], indent=indent) def json_to_vpdq(json_str: str) -> t.List[VpdqCompactFeature]: """Load a str as a json object and convert from json object to VPDQ features""" return [VpdqCompactFeature.from_str(s) for s in json.loads(json_str or "[]")] def METHOD_NAME(features: t.List[VpdqCompactFeature]) -> t.List[VpdqCompactFeature]: """Filter out the VPDQ feature with exact same hash in a list of VPDQ features Args: features Returns: List of VPDQ Features with unique features """ unique_features = set() ret = [] for h in features: if h.pdq_hex not in unique_features: ret.append(h) unique_features.add(h.pdq_hex) return ret def quality_filter( features: t.List[VpdqCompactFeature], quality_tolerance: int ) -> t.List[VpdqCompactFeature]: """Filter VPDQ feature that has a quality lower than quality_tolerance Args: features quality_tolerance : If frames is this quality level then it will be ignored Returns: List of VPDQFeatures with quality higher than quality_tolerance """ return [f for f in features if f.quality >= quality_tolerance] def OLD_json_to_vpdq(json_str: str) -> t.List[vpdq.VpdqFeature]: """Load a str as a json object and convert from json object to VPDQ features""" if not json_str: return [] features = [] # VPDQ feature's timestamp is round to 3 decimals vpdq_json = json.loads( json_str, parse_float=lambda x: round(float(x), VPDQ_TIMESTAMP_PRECISION) ) for frame_number, feature in vpdq_json.items(): features.append( vpdq.VpdqFeature( feature[QUALITY], int(frame_number), feature[HASH], feature[TIMESTAMP] ) ) return features def OLD_read_file_to_hash( input_hash_filename: t.Union[str, pathlib.Path] ) -> t.List[VpdqCompactFeature]: """Read hash file and return list of VPDQ features Args: Input hash file path Returns: VPDQ features from the hash file""" with open(input_hash_filename, "r") as file: return OLD_json_to_vpdq(file.read()) def OLD_dump_hash_to_file( output_hash_filename: t.Union[str, pathlib.Path], vpdq_features: t.List[VpdqCompactFeature], ) -> None: """Write list of VPDQ features to output hash file Args: Output hash file path VPDQ features write to the output file""" with open(output_hash_filename, "w") as file: file.write(vpdq_to_json(vpdq_features)) def prepare_vpdq_feature( signal_str: str, quality_tolerance: int ) -> t.List[VpdqCompactFeature]: """Convert signal_str to deduped and quality-filtered vdqp features Args: quality_tolerance : The quality tolerance of VPDQ Feature. If VPDQ Feature is below this quality level then it will not be added """ features = json_to_vpdq(signal_str) return METHOD_NAME(quality_filter(features, quality_tolerance))
5,607
update
# Copyright (C) 2021-2022 Intel Corporation # Copyright (C) 2023 CVAT.ai Corporation # # SPDX-License-Identifier: MIT import os from tempfile import TemporaryDirectory import rq from typing import Any, Callable, List, Mapping, Tuple from datumaro.components.errors import DatasetError, DatasetImportError, DatasetNotFoundError from django.db import transaction from cvat.apps.engine import models from cvat.apps.engine.serializers import DataSerializer, TaskWriteSerializer from cvat.apps.engine.task import _create_thread as create_task from cvat.apps.dataset_manager.task import TaskAnnotation from .annotation import AnnotationIR from .bindings import ProjectData, load_dataset_data, CvatImportError from .formats.registry import make_exporter, make_importer def export_project(project_id, dst_file, format_name, server_url=None, save_images=False): # For big tasks dump function may run for a long time and # we dont need to acquire lock after the task has been initialized from DB. # But there is the bug with corrupted dump file in case 2 or # more dump request received at the same time: # https://github.com/opencv/cvat/issues/217 with transaction.atomic(): project = ProjectAnnotationAndData(project_id) project.init_from_db() exporter = make_exporter(format_name) with open(dst_file, 'wb') as f: project.export(f, exporter, host=server_url, save_images=save_images) class ProjectAnnotationAndData: def __init__(self, pk: int): self.db_project = models.Project.objects.get(id=pk) self.db_tasks = models.Task.objects.filter(project__id=pk).exclude(data=None).order_by('id') self.task_annotations: dict[int, TaskAnnotation] = dict() self.annotation_irs: dict[int, AnnotationIR] = dict() self.tasks_to_add: list[models.Task] = [] def reset(self): for annotation_ir in self.annotation_irs.values(): annotation_ir.reset() def put(self, tasks_data: Mapping[int,Any]): for task_id, data in tasks_data.items(): self.task_annotations[task_id].put(data) def create(self, tasks_data: Mapping[int,Any]): for task_id, data in tasks_data.items(): self.task_annotations[task_id].create(data) def METHOD_NAME(self, tasks_data: Mapping[int,Any]): for task_id, data in tasks_data.items(): self.task_annotations[task_id].METHOD_NAME(data) def delete(self, tasks_data: Mapping[int,Any]=None): if tasks_data is not None: for task_id, data in tasks_data.items(): self.task_annotations[task_id].put(data) else: for task_annotation in self.task_annotations.values(): task_annotation.delete() def add_task(self, task_fields: dict, files: dict, project_data: ProjectData = None): def split_name(file): _, name = file.split(files['data_root']) return name data_serializer = DataSerializer(data={ "server_files": files['media'], #TODO: following fields should be replaced with proper input values from request in future "use_cache": False, "use_zip_chunks": True, "image_quality": 70, }) data_serializer.is_valid(raise_exception=True) db_data = data_serializer.save() db_task = TaskWriteSerializer.create(None, { **task_fields, 'data_id': db_data.id, 'project_id': self.db_project.id }) data = {k:v for k, v in data_serializer.data.items()} data['use_zip_chunks'] = data_serializer.validated_data['use_zip_chunks'] data['use_cache'] = data_serializer.validated_data['use_cache'] data['copy_data'] = data_serializer.validated_data['copy_data'] data['server_files_path'] = files['data_root'] data['stop_frame'] = None data['server_files'] = list(map(split_name, data['server_files'])) create_task(db_task, data, isDatasetImport=True) self.db_tasks = models.Task.objects.filter(project__id=self.db_project.id).exclude(data=None).order_by('id') self.init_from_db() if project_data is not None: project_data.new_tasks.add(db_task.id) project_data.init() def add_labels(self, labels: List[models.Label], attributes: List[Tuple[str, models.AttributeSpec]] = None): for label in labels: label.project = self.db_project # We need label_id here, so we can't use bulk_create here label.save() for label_name, attribute in attributes or []: label, = filter(lambda l: l.name == label_name, labels) attribute.label = label if attributes: models.AttributeSpec.objects.bulk_create([a[1] for a in attributes]) def init_from_db(self): self.reset() for task in self.db_tasks: annotation = TaskAnnotation(pk=task.id) annotation.init_from_db() self.task_annotations[task.id] = annotation self.annotation_irs[task.id] = annotation.ir_data def export(self, dst_file: str, exporter: Callable, host: str='', **options): project_data = ProjectData( annotation_irs=self.annotation_irs, db_project=self.db_project, host=host ) temp_dir_base = self.db_project.get_tmp_dirname() os.makedirs(temp_dir_base, exist_ok=True) with TemporaryDirectory(dir=temp_dir_base) as temp_dir: exporter(dst_file, temp_dir, project_data, **options) def load_dataset_data(self, *args, **kwargs): load_dataset_data(self, *args, **kwargs) def import_dataset(self, dataset_file, importer, **options): project_data = ProjectData( annotation_irs=self.annotation_irs, db_project=self.db_project, task_annotations=self.task_annotations, project_annotation=self, ) project_data.soft_attribute_import = True temp_dir_base = self.db_project.get_tmp_dirname() os.makedirs(temp_dir_base, exist_ok=True) with TemporaryDirectory(dir=temp_dir_base) as temp_dir: importer(dataset_file, temp_dir, project_data, self.load_dataset_data, **options) self.create({tid: ir.serialize() for tid, ir in self.annotation_irs.items() if tid in project_data.new_tasks}) @property def data(self) -> dict: raise NotImplementedError() @transaction.atomic def import_dataset_as_project(src_file, project_id, format_name, conv_mask_to_poly): rq_job = rq.get_current_job() rq_job.meta['status'] = 'Dataset import has been started...' rq_job.meta['progress'] = 0. rq_job.save_meta() project = ProjectAnnotationAndData(project_id) project.init_from_db() importer = make_importer(format_name) with open(src_file, 'rb') as f: try: project.import_dataset(f, importer, conv_mask_to_poly=conv_mask_to_poly) except (DatasetError, DatasetImportError, DatasetNotFoundError) as ex: raise CvatImportError(str(ex))
5,608
eval basic multisum identity
import contextlib import concurrent.futures as futures from itertools import chain import json import os from tempfile import gettempdir import pytest from stress_tests.experiment import ( ExperimentConditions, Experiment, Encoder, Replication ) from stress_tests import read_mlir from stress_tests.utils import CONCRETECOMPILER, run from stress_tests.v0_parameters import P_MAX, LOG2_MANP_MAX POSSIBLE_BITWIDTH = range(1, P_MAX+1) POSSIBLE_SIZE = range(1, 128) TEST_PATH = os.path.dirname(__file__) TRACE = os.path.join(TEST_PATH, 'trace') JIT_INVOKE_MAIN = ( '--action=jit-invoke', '--funcname=main', '--jit-keyset-cache-path=/tmp/StresstestsCache', ) def jit_args(*params): return tuple( f'--jit-args={p}' for p in params ) CONTROLLED_CODE_PARAMS = sorted(chain.from_iterable( { #(bitwidth, size, input value) (bitwidth, POSSIBLE_SIZE[-1], 0), (bitwidth, 1, 1), (bitwidth, bitwidth, 1), (bitwidth, 2 ** (bitwidth - 2), 1), (bitwidth, 2 ** (bitwidth - 1), 1), (bitwidth, 2 ** bitwidth - 1, 1), (bitwidth, 2 ** bitwidth, 1), # force carry (bitwidth, 2 ** (bitwidth+1), 1), # force overflow and carry 0 ? }# <-- a set to deduplicate similar cases for bitwidth in POSSIBLE_BITWIDTH )) CONTROLLED_CODE_PARAMS = [ case for case in CONTROLLED_CODE_PARAMS if case[1] >= 1 ] TEST_CONTROLLED_REPLICATE = 100 WILD_CODE_PARAMS = list(sorted(chain.from_iterable( { #(bitwidth, size, input value) (bitwidth, 2 ** bitwidth + 8, 1), (bitwidth, 2 ** bitwidth + 9, 1), (bitwidth, 2 ** bitwidth + 16, 1), (bitwidth, 2 ** bitwidth + 17, 1), (bitwidth, 2 ** (2 * bitwidth), 1), (bitwidth, 2 ** (2 * bitwidth) + 1, 1), }# <-- a set to deduplicate similar cases for bitwidth in POSSIBLE_BITWIDTH ))) TEST_WILD_RETRY = 3 def basic_multisum_identity(bitwidth, size): def components(name, size, ty=''): ty_annot = ' : ' + ty if ty else '' return ', '.join(f'%{name}{i}{ty_annot}' for i in range(size)) def tensor(size, ty): return f'tensor<{size}x{ty}>' v_ty = f"!FHE.eint<{bitwidth}>" tv_ty = tensor(size, v_ty) w_ty = f"i{bitwidth+1}" w_modulo = 2 ** bitwidth # to match v bitwidth tw_ty = tensor(size, w_ty) lut_size = 2**bitwidth lut_ty = 'i64' tlut_ty = tensor(lut_size, lut_ty) return ( f""" func.func @main({components('v', size, v_ty)}) -> {v_ty} {{ %v = tensor.from_elements {components('v', size)} : {tv_ty} // Declare {size} %wX components { ''.join(f''' %w{i} = arith.constant 1: {w_ty}''' for i in range(size) )} %w = tensor.from_elements {components('w', size)} : {tw_ty} // Declare {lut_size} %lutX components { ''.join(f''' %lut{i} = arith.constant {i}: i64''' for i in range(lut_size) )} %lut = tensor.from_elements {components('lut', lut_size)} : {tlut_ty} %dot_product = "FHELinalg.dot_eint_int"(%v, %w) : ({tv_ty}, {tw_ty}) -> {v_ty} %pbs_result = "FHE.apply_lookup_table"(%dot_product, %lut): ({v_ty}, {tlut_ty}) -> {v_ty} return %pbs_result: {v_ty} }} """ ) executor = futures.ThreadPoolExecutor() def basic_setup(bitwidth, size, const, retry=10): code = basic_multisum_identity(bitwidth, size) args = (const,) * size expected = METHOD_NAME(bitwidth, args) with tmp_file(f'basic_{bitwidth:03}_{size:03}_{const}.mlir', code) as path: modulo = 2 ** bitwidth # Read various value from compiler log_manp_max = read_mlir.log_manp_max(path) params = read_mlir.v0_param(path) conditions_details = [] def msg(m, append_here=None, space=' '): print(m, end=space, flush=True) # test human output if append_here is not None: append_here.append(m) if (LOG2_MANP_MAX < log_manp_max): msg('HIGH-MANP', conditions_details) if 2 ** bitwidth <= expected: msg(f'OVERFLOW', conditions_details) cmd = (CONCRETECOMPILER, path) + JIT_INVOKE_MAIN + jit_args(*args) compilers_calls = [executor.submit(run, *cmd) for _ in range(retry)] success = 0 overflow = 0 replications = [] for replication in futures.as_completed(compilers_calls): result = int(replication.result().splitlines()[-1]) correct_in_modulo = expected % modulo == result % modulo details = [] replications.append(Replication(correct_in_modulo, details)) if not (0 <= result < modulo): msg(f'OVERFLOW {result}', details) overflow += 1 if correct_in_modulo: msg('O', space='') success += 1 else: msg('X', space='') diff = f'Expected :{expected % modulo} vs. {result % modulo} (no modulo {expected} vs. {result}' details.append(diff) print(' ', end='') add_to(TRACE, Experiment( cmd = ' '.join(cmd), conditions=ExperimentConditions( bitwidth=bitwidth, size=size, args=args, log_manp_max=log_manp_max, overflow=2 ** bitwidth <= expected, details=conditions_details,), replications=replications, code=code, success_rate=100.0 * success/retry, overflow_rate=100.0 * overflow/retry, )) assert success == len(replications) def METHOD_NAME(bitwidth, args): return sum( arg for arg in args ) @contextlib.contextmanager def tmp_file(name, content, delete=False): path = os.path.join(gettempdir(), 'stresstests', name) os.makedirs(os.path.dirname(path), exist_ok=True) with open(path, 'w') as f: f.write(content) yield f.name if delete: os.remove() def add_to(DIR, expe: Experiment): full_test_name = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0] test_name = full_test_name.rsplit('[', 1)[0] DIR = os.path.join(DIR, test_name) os.makedirs(DIR, exist_ok=True) conditions = expe.conditions name = f'{conditions.bitwidth:03}bits_x_{conditions.size:03}_{conditions.args[0]}' with open(os.path.join(DIR, name), 'w') as f: json.dump(expe, f, indent=2, cls=Encoder) @pytest.mark.parametrize("bitwidth, size, const", CONTROLLED_CODE_PARAMS) def test_controlled(bitwidth, size, const): return basic_setup(bitwidth, size, const, TEST_CONTROLLED_REPLICATE) @pytest.mark.parametrize("bitwidth, size, const", WILD_CODE_PARAMS) def test_wild(bitwidth, size, const): return basic_setup(bitwidth, size, const, TEST_WILD_RETRY)
5,609
to dict
# coding: utf-8 """ Kubeflow Pipelines API This file contains REST API specification for Kubeflow Pipelines. The file is autogenerated from the swagger definition. Contact: kubeflow-pipelines@google.com Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from kfp_server_api.configuration import Configuration class ApiRunDetail(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'run': 'ApiRun', 'pipeline_runtime': 'ApiPipelineRuntime' } attribute_map = { 'run': 'run', 'pipeline_runtime': 'pipeline_runtime' } def __init__(self, run=None, pipeline_runtime=None, local_vars_configuration=None): # noqa: E501 """ApiRunDetail - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._run = None self._pipeline_runtime = None self.discriminator = None if run is not None: self.run = run if pipeline_runtime is not None: self.pipeline_runtime = pipeline_runtime @property def run(self): """Gets the run of this ApiRunDetail. # noqa: E501 :return: The run of this ApiRunDetail. # noqa: E501 :rtype: ApiRun """ return self._run @run.setter def run(self, run): """Sets the run of this ApiRunDetail. :param run: The run of this ApiRunDetail. # noqa: E501 :type run: ApiRun """ self._run = run @property def pipeline_runtime(self): """Gets the pipeline_runtime of this ApiRunDetail. # noqa: E501 :return: The pipeline_runtime of this ApiRunDetail. # noqa: E501 :rtype: ApiPipelineRuntime """ return self._pipeline_runtime @pipeline_runtime.setter def pipeline_runtime(self, pipeline_runtime): """Sets the pipeline_runtime of this ApiRunDetail. :param pipeline_runtime: The pipeline_runtime of this ApiRunDetail. # noqa: E501 :type pipeline_runtime: ApiPipelineRuntime """ self._pipeline_runtime = pipeline_runtime def METHOD_NAME(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.METHOD_NAME() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.METHOD_NAME() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].METHOD_NAME()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.METHOD_NAME()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ApiRunDetail): return False return self.METHOD_NAME() == other.METHOD_NAME() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, ApiRunDetail): return True return self.METHOD_NAME() != other.METHOD_NAME()
5,610
test nc pseudos
from __future__ import annotations import collections import os.path import pytest from pytest import approx from pymatgen.io.abinit.pseudos import Pseudo, PseudoTable from pymatgen.util.testing import TEST_FILES_DIR, PymatgenTest _test_dir = f"{TEST_FILES_DIR}/abinit" def ref_file(filename): return os.path.join(_test_dir, filename) def ref_files(*filenames): return list(map(ref_file, filenames)) class PseudoTestCase(PymatgenTest): def setUp(self): nc_pseudo_fnames = collections.defaultdict(list) nc_pseudo_fnames["Si"] = ref_files("14si.pspnc", "14si.4.hgh", "14-Si.LDA.fhi") self.nc_pseudos = collections.defaultdict(list) for symbol, fnames in nc_pseudo_fnames.items(): for fname in fnames: root, ext = os.path.splitext(fname) pseudo = Pseudo.from_file(fname) self.nc_pseudos[symbol].append(pseudo) # Save the pseudo as instance attribute whose name # is constructed with the rule: symbol_ppformat attr_name = symbol + "_" + ext[1:] if hasattr(self, attr_name): raise RuntimeError(f"self has already the attribute {attr_name}") setattr(self, attr_name, pseudo) def METHOD_NAME(self): """Test norm-conserving pseudopotentials.""" for symbol, pseudos in self.nc_pseudos.items(): for pseudo in pseudos: assert repr(pseudo) assert str(pseudo) assert pseudo.isnc assert not pseudo.ispaw assert pseudo.Z == 14 assert pseudo.symbol == symbol assert pseudo.Z_val == 4 assert pseudo.nlcc_radius >= 0.0 # Test pickle self.serialize_with_pickle(pseudo) # Test MSONable self.assert_msonable(pseudo) # HGH pseudos pseudo = self.Si_hgh assert not pseudo.has_nlcc assert pseudo.l_max == 1 assert pseudo.l_local == 0 assert not pseudo.supports_soc assert self.Si_hgh.md5 is not None assert self.Si_hgh == self.Si_hgh # TM pseudos pseudo = self.Si_pspnc assert pseudo.has_nlcc assert pseudo.l_max == 2 assert pseudo.l_local == 2 assert not pseudo.supports_soc assert self.Si_hgh != self.Si_pspnc # FHI pseudos pseudo = self.Si_fhi assert not pseudo.has_nlcc assert pseudo.l_max == 3 assert pseudo.l_local == 2 assert not pseudo.supports_soc # Test PseudoTable table = PseudoTable(self.nc_pseudos["Si"]) assert repr(table) assert str(table) assert table.allnc assert not table.allpaw assert table.is_complete assert len(table) == 3 assert len(table[14]) == 3 assert len(table.select_symbols("Si")) == 3 assert table.zlist == [14] # Test pickle self.serialize_with_pickle(table, test_eq=False) def test_pawxml_pseudos(self): """Test O.GGA_PBE-JTH-paw.xml.""" oxygen = Pseudo.from_file(ref_file("O.GGA_PBE-JTH-paw.xml")) assert repr(oxygen) assert str(oxygen) assert isinstance(oxygen.as_dict(), dict) assert oxygen.ispaw assert oxygen.symbol == "O" assert (oxygen.Z, oxygen.core, oxygen.valence) == (8, 2, 6), oxygen.Z_val == 6 assert oxygen.xc.type == "GGA" assert oxygen.xc.name == "PBE" assert oxygen.supports_soc assert oxygen.md5 is not None assert oxygen.paw_radius == approx(1.4146523028) # Test pickle new_objs = self.serialize_with_pickle(oxygen) # Test MSONable self.assert_msonable(oxygen) for obj in new_objs: assert obj.ispaw assert obj.symbol == "O" assert (obj.Z, obj.core, obj.valence) == (8, 2, 6), obj.Z_val == 6 assert obj.paw_radius == approx(1.4146523028) def test_oncvpsp_pseudo_sr(self): """Test the ONCVPSP Ge pseudo (scalar relativistic version).""" ger = Pseudo.from_file(ref_file("ge.oncvpsp")) assert repr(ger) assert str(ger) assert isinstance(ger.as_dict(), dict) ger.as_tmpfile() assert ger.symbol == "Ge" assert ger.Z == 32.0 assert ger.Z_val == 4.0 assert ger.isnc assert not ger.ispaw assert ger.l_max == 2 assert ger.l_local == 4 assert ger.rcore is None assert not ger.supports_soc # Data persistence self.serialize_with_pickle(ger) self.assert_msonable(ger) def test_oncvpsp_pseudo_fr(self): """Test the ONCVPSP Pb pseudo (relativistic version with SO).""" pb = Pseudo.from_file(ref_file("Pb-d-3_r.psp8")) repr(pb) str(pb) # Data persistence self.serialize_with_pickle(pb) self.assert_msonable(pb) assert pb.symbol == "Pb" assert pb.Z == 82.0 assert pb.Z_val == 14.0 assert pb.isnc assert not pb.ispaw assert pb.l_max == 2 assert pb.l_local == 4 assert pb.supports_soc class TestPseudoTable(PymatgenTest): def test_methods(self): """Test PseudoTable methods.""" table = PseudoTable(ref_files("14si.pspnc", "14si.4.hgh", "14-Si.LDA.fhi")) assert str(table) assert len(table) == 3 for pseudo in table: assert pseudo.isnc assert table.allnc assert not table.allpaw assert table.zlist == [14] # Data persistence self.serialize_with_pickle(table, test_eq=False) d = table.as_dict() PseudoTable.from_dict(d) self.assert_msonable(table) selected = table.select_symbols("Si") assert len(selected) == len(table) assert selected.__class__ is table.__class__ with pytest.raises(ValueError, match=r"Found multiple occurrences of symbol\(s\) Si"): table.pseudos_with_symbols("Si")
5,611
test type configs
# -*- coding: utf-8 -*- import unittest from lxml import etree import sys sys.path.append('.') from androguard.core import apk, axml from operator import itemgetter TEST_APP_NAME = "TestsAndroguardApplication" TEST_ICONS = { 120: "res/drawable-ldpi/icon.png", 160: "res/drawable-mdpi/icon.png", 240: "res/drawable-hdpi/icon.png", 65536: "res/drawable-hdpi/icon.png" } TEST_CONFIGS = { "layout": [axml.ARSCResTableConfig.default_config()], "string": [axml.ARSCResTableConfig.default_config()], "drawable": [ axml.ARSCResTableConfig(sdkVersion=4, density=120), axml.ARSCResTableConfig(sdkVersion=4, density=160), axml.ARSCResTableConfig(sdkVersion=4, density=240) ] } class ARSCTest(unittest.TestCase): @classmethod def setUpClass(cls): with open("examples/android/TestsAndroguard/bin/TestActivity.apk", "rb") as fd: cls.apk = apk.APK(fd.read(), True) def testARSC(self): arsc = self.apk.get_android_resources() self.assertTrue(arsc) def testAppName(self): app_name = self.apk.get_app_name() self.assertEqual(app_name, TEST_APP_NAME, "Couldn't deduce application/activity label") def testAppIcon(self): for wanted_density, correct_path in TEST_ICONS.items(): app_icon_path = self.apk.get_app_icon(wanted_density) self.assertEqual(app_icon_path, correct_path, "Incorrect icon path for requested density") def testStrings(self): arsc = self.apk.get_android_resources() p = arsc.get_packages_names()[0] l = "\x00\x00" e = etree.fromstring(arsc.get_string_resources(p, l)) self.assertEqual(e.find("string[@name='hello']").text, 'Hello World, TestActivity! kikoololmodif') self.assertEqual(e.find("string[@name='app_name']").text, 'TestsAndroguardApplication') def testResourceNames(self): """ Test if the resource name translation works """ arsc = self.apk.get_android_resources() self.assertEqual(arsc.get_resource_xml_name(0x7F040001), "@tests.androguard:string/app_name") self.assertEqual(arsc.get_resource_xml_name(0x7F020000), "@tests.androguard:drawable/icon") self.assertEqual(arsc.get_resource_xml_name(0x7F040001, 'tests.androguard'), "@string/app_name") self.assertEqual(arsc.get_resource_xml_name(0x7F020000, 'tests.androguard'), "@drawable/icon") # Also test non existing resources self.assertIsNone(arsc.get_resource_xml_name(0xFFFFFFFF)) self.assertEqual(arsc.get_id('sdf', 0x7F040001), (None, None, None)) self.assertEqual(arsc.get_id('tests.androguard', 0xFFFFFFFF), (None, None, None)) def testDifferentStringLocales(self): """ Test if the resolving of different string locales works """ a = APK("examples/tests/a2dp.Vol_137.apk") arsc = a.get_android_resources() p = arsc.get_packages_names()[0] self.assertEqual(sorted(["\x00\x00", "da", "de", "el", "fr", "ja", "ru"]), sorted(arsc.get_locales(p))) item = "SMSDelayText" strings = {"\x00\x00": "Delay for reading text message", "da": "Forsinkelse for læsning af tekst besked", "de": "Verzögerung vor dem Lesen einer SMS", "el": "Χρονοκαθυστέρηση ανάγνωσης μηνυμάτων SMS", "fr": "Délai pour lire un SMS", "ja": "テキストメッセージ読み上げの遅延", "ru": "Задержка зачитывания SMS", } for k, v in strings.items(): e = etree.fromstring(arsc.get_string_resources(p, k)) self.assertEqual(e.find("string[@name='{}']".format(item)).text, v) def METHOD_NAME(self): arsc = self.apk.get_android_resources() configs = arsc.get_type_configs(None) for res_type, test_configs in list(TEST_CONFIGS.items()): config_set = set(test_configs) self.assertIn(res_type, configs, "resource type %s was not found" % res_type) for config in configs[res_type]: print(config.get_config_name_friendly()) self.assertIn(config, config_set, "config %r was not expected" % config) config_set.remove(config) self.assertEqual(len(config_set), 0, "configs were not found: %s" % config_set) unexpected_types = set(TEST_CONFIGS.keys()) - set(configs.keys()) self.assertEqual(len(unexpected_types), 0, "received unexpected resource types: %s" % unexpected_types) def testFallback(self): a = apk.APK("examples/tests/com.teleca.jamendo_35.apk") # Should use the fallback self.assertEqual(a.get_app_name(), "Jamendo") res_parser = a.get_android_resources() res_id = int(a.get_attribute_value('application', 'label')[1:], 16) # Default Mode, no config self.assertEqual(len(res_parser.get_res_configs(res_id)), 2) # With default config, but fallback self.assertEqual(len(res_parser.get_res_configs(res_id, axml.ARSCResTableConfig.default_config())), 1) # With default config but no fallback self.assertEqual(len(res_parser.get_res_configs(res_id, axml.ARSCResTableConfig.default_config(), fallback=False)), 0) # Also test on resolver: self.assertListEqual(list(map(itemgetter(1), res_parser.get_resolved_res_configs(res_id))), ["Jamendo", "Jamendo"]) self.assertListEqual(list(map(itemgetter(1), res_parser.get_resolved_res_configs(res_id, axml.ARSCResTableConfig.default_config()))), ["Jamendo"]) def testIDParsing(self): parser = axml.ARSCParser.parse_id self.assertEqual(parser('@DEADBEEF'), (0xDEADBEEF, None)) self.assertEqual(parser('@android:DEADBEEF'), (0xDEADBEEF, 'android')) self.assertEqual(parser('@foobar:01020304'), (0x01020304, 'foobar')) with self.assertRaises(ValueError): parser('@whatisthis') with self.assertRaises(ValueError): parser('#almost') with self.assertRaises(ValueError): parser('@android:NONOTHEX') with self.assertRaises(ValueError): parser('@android:00') if __name__ == '__main__': unittest.main()
5,612
test getregion
import unittest2 as unittest # import unittest from libagent import error, playground, grassland import grass.script as grass from grass.script import array as garray class TestGrassland(unittest.TestCase): def setUp(self): # TODO check if there is a nicer way to do this.. self.rastlayername = None # "r_agent_rast_testmap@"+grass.gisenv()['MAPSET'] self.vectlayername = None # "r_agent_vect_testmap@"+grass.gisenv()['MAPSET'] if self.rastlayername: for m in grass.list_strings("rast"): if self.rastlayername == m: print( "We need a raster map to play with in this test," + " but it seems to exist already: '" + self.rastlayername + "'" ) self.assertTrue(False) if self.vectlayername: for m in grass.list_strings("vect"): if self.vectlayername == m: print( "We need a vector map to play with in this test," + " but it seems to exist already: '" + self.vectlayername + "'" ) self.assertTrue(False) self.pg = grassland.Grassland() def METHOD_NAME(self): self.assertIsNotNone(self.pg.getregion()) self.assertEqual(self.pg.getregion(), grass.region()) def test_setregion(self): # TODO should not be required here.. maybe "resetregion()"? # is set in constructor pass def test_getbound(self): n = self.pg.region["n"] s = self.pg.region["s"] w = self.pg.region["w"] e = self.pg.region["e"] ns = self.pg.region["nsres"] ew = self.pg.region["ewres"] r = self.pg.region["rows"] c = self.pg.region["cols"] self.assertIsNotNone(n) self.assertIsNotNone(s) self.assertIsNotNone(w) self.assertIsNotNone(e) self.assertTrue(n > s) self.assertTrue(e > w) self.assertEqual((n - s) / ns, r) self.assertEqual((e - w) / ew, c) def test_setlayer(self): # gets tested in createlayer and super()/Playground pass def test_setgrasslayer(self): # only do this test, if self.rastlayername is set if self.rastlayername: layer = garray.array() # set the layer ## the first time it is missing self.assertRaises( error.DataError, self.pg.setgrasslayer, *[self.rastlayername, self.rastlayername], ) ## so we need to write it first pass # TODO ## the second time it should be added correctly ## now test whether it fails the third time # self.assertRaises(error.Error, self.pg.setgrasslayer, # *[self.rastlayername, self.rastlayername]) # if not ( self.pg.layers.has_key(self.rastlayername) and \ # self.pg.grassmapnames.has_key(self.rastlayername) ): # print "GRASS map layer was set but seems missing" # self.assertTrue(False) # set it once more, this time forcing it # self.pg.setgrasslayer(self.rastlayername, self.rastlayername, True) def test_createlayer(self): self.pg.createlayer("foo", "foo") self.assertTrue("foo" in self.pg.layers) self.assertTrue("foo" in self.pg.grassmapnames) self.assertEqual(len(self.pg.layers["foo"]), self.pg.region["rows"]) self.assertEqual(len(self.pg.layers["foo"][0]), self.pg.region["cols"]) def test_getlayer(self): # gets tested in createlayer and super()/Playground pass def test_removelayer(self): self.pg.layers["foo"] = [0] self.pg.grassmapnames["foo"] = "foo" self.assertTrue("foo" in self.pg.layers) self.pg.removelayer("foo") self.assertFalse("foo" in self.pg.layers) self.assertFalse("foo" in self.pg.grassmapnames) def test_writelayer(self): if self.rastlayername: # create an empty test map layer = garray.array() self.pg.createlayer(self.rastlayername, self.rastlayername) # write it once self.pg.writelayer(self.rastlayername) # now remove it from the internal grasslayer list self.pg.grassmapnames.pop(self.rastlayername) self.assertRaises(error.DataError, self.pg.writelayer, self.rastlayername) # try again, this time being explicit, but still fail self.assertRaises( error.DataError, self.pg.writelayer, *[self.rastlayername, self.rastlayername], ) # force write it again.. self.pg.writelayer(self.rastlayername, self.rastlayername, True) def test_parsevectorlayer(self): if self.vectlayername: # TODO find a way to write vector files.. pass def test_decaycellvalues(self): l = "bar" self.pg.createlayer(l) self.pg.layers[l][0][0] = 100 self.pg.decaycellvalues(l, 3) self.assertEqual(int(round(self.pg.layers[l][0][0])), 79) self.pg.decaycellvalues(l, 3) self.assertEqual(int(round(self.pg.layers[l][0][0])), 63) self.pg.decaycellvalues(l, 3) self.assertEqual(int(round(self.pg.layers[l][0][0])), 50) self.pg.decaycellvalues(l, 3, 50) self.assertEqual(int(round(self.pg.layers[l][0][0])), 50) def tearDown(self): if self.rastlayername: grass.try_remove( grass.find_file(name=self.rastlayername, element="cell")["file"] ) if self.vectlayername: grass.try_remove( grass.find_file(name=self.vectlayername, element="vector")["file"] )
5,613
check params
# MIT License # # Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2020 # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated # documentation files (the "Software"), to deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the # Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """ This module implements attribute inference attacks. """ from __future__ import absolute_import, division, print_function, unicode_literals import logging from typing import Optional import numpy as np from art.estimators.classification.scikitlearn import ScikitlearnDecisionTreeClassifier from art.attacks.attack import AttributeInferenceAttack logger = logging.getLogger(__name__) class AttributeInferenceWhiteBoxDecisionTree(AttributeInferenceAttack): """ A variation of the method proposed by of Fredrikson et al. in: https://dl.acm.org/doi/10.1145/2810103.2813677 Assumes the availability of the attacked model's predictions for the samples under attack, in addition to access to the model itself and the rest of the feature values. If this is not available, the true class label of the samples may be used as a proxy. Also assumes that the attacked feature is discrete or categorical, with limited number of possible values. For example: a boolean feature. | Paper link: https://dl.acm.org/doi/10.1145/2810103.2813677 """ _estimator_requirements = (ScikitlearnDecisionTreeClassifier,) def __init__(self, classifier: ScikitlearnDecisionTreeClassifier, attack_feature: int = 0): """ Create an AttributeInferenceWhiteBox attack instance. :param classifier: Target classifier. :param attack_feature: The index of the feature to be attacked. """ super().__init__(estimator=classifier, attack_feature=attack_feature) self.attack_feature: int self.METHOD_NAME() def infer(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: """ Infer the attacked feature. If the model's prediction coincides with the real prediction for the sample for a single value, choose it as the predicted value. If not, fall back to the Fredrikson method (without phi) :param x: Input to attack. Includes all features except the attacked feature. :param y: Original model's predictions for x. :param values: Possible values for attacked feature. :type values: list :param priors: Prior distributions of attacked feature values. Same size array as `values`. :type priors: list :return: The inferred feature values. """ if "priors" not in kwargs: # pragma: no cover raise ValueError("Missing parameter `priors`.") if "values" not in kwargs: # pragma: no cover raise ValueError("Missing parameter `values`.") priors: Optional[list] = kwargs.get("priors") values: Optional[list] = kwargs.get("values") if self.estimator.input_shape[0] != x.shape[1] + 1: # pragma: no cover raise ValueError("Number of features in x + 1 does not match input_shape of classifier") if priors is None or values is None: # pragma: no cover raise ValueError("`priors` and `values` are required as inputs.") if len(priors) != len(values): # pragma: no cover raise ValueError("Number of priors does not match number of values") if y is not None and y.shape[0] != x.shape[0]: # pragma: no cover raise ValueError("Number of rows in x and y do not match") if self.attack_feature >= x.shape[1]: # pragma: no cover raise ValueError("attack_feature must be a valid index to a feature in x") n_values = len(values) n_samples = x.shape[0] # Will contain the model's predictions for each value pred_values = [] # Will contain the probability of each value prob_values = [] for i, value in enumerate(values): # prepare data with the given value in the attacked feature v_full = np.full((n_samples, 1), value).astype(x.dtype) x_value = np.concatenate((x[:, : self.attack_feature], v_full), axis=1) x_value = np.concatenate((x_value, x[:, self.attack_feature :]), axis=1) # Obtain the model's prediction for each possible value of the attacked feature pred_value = [np.argmax(arr) for arr in self.estimator.predict(x_value)] pred_values.append(pred_value) # find the relative probability of this value for all samples being attacked prob_value = [ ( (self.estimator.get_samples_at_node(self.estimator.get_decision_path([row])[-1]) / n_samples) * priors[i] ) for row in x_value ] prob_values.append(prob_value) # Find the single value that coincides with the real prediction for the sample (if it exists) pred_rows = zip(*pred_values) predicted_pred = [] for row_index, row in enumerate(pred_rows): if y is not None: matches = [1 if row[value_index] == y[row_index] else 0 for value_index in range(n_values)] match_values = [ values[value_index] if row[value_index] == y[row_index] else 0 for value_index in range(n_values) ] else: matches = [0 for _ in range(n_values)] match_values = [0 for _ in range(n_values)] predicted_pred.append(sum(match_values) if sum(matches) == 1 else None) # Choose the value with highest probability for each sample predicted_prob = [np.argmax(list(prob)) for prob in zip(*prob_values)] return np.array( [ value if value is not None else values[predicted_prob[index]] for index, value in enumerate(predicted_pred) ] ) def METHOD_NAME(self) -> None: super().METHOD_NAME()
5,614
test invalid subpolicy base types
import pytest from aerospike import exception as e import aerospike class TestInvalidClientConfig(object): def test_no_config(self): with pytest.raises(e.ParamError) as err: aerospike.client() assert "No config argument" in err.value.msg def test_config_not_dict(self): with pytest.raises(e.ParamError) as err: aerospike.client([]) assert "Config must be a dict" in err.value.msg def test_no_host_in_config(self): with pytest.raises(e.ParamError) as err: aerospike.client({}) assert "Hosts must be a list" in err.value.msg def test_wrong_host_type(self): with pytest.raises(e.ParamError) as err: aerospike.client({"hosts": (())}) assert "Hosts must be a list" in err.value.msg def test_empty_host_in_config(self): with pytest.raises(e.ParamError) as err: aerospike.client({"hosts": []}) assert "Hosts must not be empty" in err.value.msg def test_invalid_host_in_list(self): with pytest.raises(e.ParamError) as err: aerospike.client({"hosts": [("localhost", 3000), ()]}) assert "Invalid host" in err.value.msg def test_lua_user_path_too_long(self): with pytest.raises(e.ParamError) as err: aerospike.client({"hosts": [("localhost", 3000)], "lua": {"user_path": "a" * 256}}) assert "Lua user path too long" in err.value.msg def test_non_callable_serializer(self): with pytest.raises(e.ParamError) as err: aerospike.client({"hosts": [("localhost", 3000)], "serialization": (5, lambda x: 5)}) assert "Serializer must be callable" in err.value.msg def test_non_callable_deserializer(self): with pytest.raises(e.ParamError) as err: aerospike.client({"hosts": [("localhost", 3000)], "serialization": (lambda x: 5, 5)}) assert "Deserializer must be callable" in err.value.msg def test_negative_threshold_value(self): with pytest.raises(e.ParamError) as err: aerospike.client({"hosts": [("localhost", 3000)], "compression_threshold": -1}) assert "Compression value must not be negative" in err.value.msg @pytest.mark.parametrize("policy", ["read", "write", "operate", "batch", "scan", "query", "apply", "remove"]) @pytest.mark.parametrize( "key, value", [("total_timeout", "5"), ("socket_timeout", "5"), ("max_retries", "5"), ("sleep_between_retries", "5")], ) def METHOD_NAME(self, policy, key, value): subpolicy = {key: value} with pytest.raises(e.ParamError): aerospike.client({"hosts": [("localhost", 3000)], "policies": {policy: subpolicy}}) @pytest.mark.parametrize("key, value", [("deserialize", "nope"), ("key", "send"), ("replica", "maybe?")]) def test_invalid_read_policy_types(self, key, value): subpolicy = {key: value} with pytest.raises(e.ParamError): aerospike.client({"hosts": [("localhost", 3000)], "policies": {"read": subpolicy}}) @pytest.mark.parametrize( "key, value", [ ("key", "send"), # should be int ("exists", "exists"), # should be int ("gen", "should be a constant integer"), # should be int ("commit_level", "committed"), # should be int ("durable_delete", "durable"), # should be bool ], ) def test_invalid_write_policy_types(self, key, value): subpolicy = {key: value} with pytest.raises(e.ParamError): aerospike.client({"hosts": [("localhost", 3000)], "policies": {"write": subpolicy}}) @pytest.mark.parametrize( "key, value", [ ("key", "send"), # should be int ("gen", "should be a constant integer"), # should be int ("replica", "maybe?"), # should be int ("commit_level", "committed"), # should be int ("durable_delete", "durable"), # should be bool ], ) def test_invalid_operat_policy_types(self, key, value): subpolicy = {key: value} with pytest.raises(e.ParamError): aerospike.client({"hosts": [("localhost", 3000)], "policies": {"operate": subpolicy}}) @pytest.mark.parametrize( "key, value", [ ("concurrent", "concurrent"), # should be bool ("allow_inline", "False"), # should be bool ("deserialize", "False"), # should be bool ], ) def test_invalid_batch_policy_types(self, key, value): subpolicy = {key: value} with pytest.raises(e.ParamError): aerospike.client({"hosts": [("localhost", 3000)], "policies": {"batch": subpolicy}}) @pytest.mark.parametrize("key, value", [("durable_delete", "durable")]) # should be bool def test_invalid_scan_policy_types(self, key, value): subpolicy = {key: value} with pytest.raises(e.ParamError): aerospike.client({"hosts": [("localhost", 3000)], "policies": {"scan": subpolicy}}) # Keep this parametrized in case query gets additional policies @pytest.mark.parametrize( "key, value", [ ("deserialize", "False"), # should be a bool ], ) def test_invalid_query_policy_types(self, key, value): subpolicy = {key: value} with pytest.raises(e.ParamError): aerospike.client({"hosts": [("localhost", 3000)], "policies": {"query": subpolicy}}) @pytest.mark.parametrize( "key, value", [ # ("gen", "should be a constant integer"), # gen removed from apply policies by C client 5.0 ("replica", "maybe?"), # should be int ("commit_level", "committed"), # should be int ("key", "send"), # should be an int eg. aerospike.POLICY_KEY_SEND ("durable_delete", "durable"), # should be bool ], ) def test_invalid_apply_policy_types(self, key, value): subpolicy = {key: value} with pytest.raises(e.ParamError): aerospike.client({"hosts": [("localhost", 3000)], "policies": {"apply": subpolicy}}) @pytest.mark.parametrize( "key, value", [ ("key", "send"), # should be an int eg. aerospike.POLICY_KEY_SEND ("durable_delete", "durable"), # should be bool ("gen", "should be a constant integer"), # should be int ("replica", "maybe?"), # should be int ("commit_level", "committed"), # should be int ], ) def test_invalid_remove_policy_types(self, key, value): subpolicy = {key: value} with pytest.raises(e.ParamError): aerospike.client({"hosts": [("localhost", 3000)], "policies": {"remove": subpolicy}})
5,615
run constant climate with bias
import numpy as np from oggm.core.flowline import ( flowline_model_run, FileModel, ) from oggm.core.massbalance import ( ConstantMassBalance, MassBalanceModel, ) from oggm import entity_task # Module logger import logging log = logging.getLogger(__name__) class BiasedConstantMassBalance(MassBalanceModel): """Time-dependant Temp and PRCP delta ConstantMassBalance model""" def __init__( self, gdir, temp_bias_ts=None, prcp_fac_ts=None, bias=0, y0=None, halfsize=15, filename="climate_historical", input_filesuffix="", **kwargs ): """Initialize Parameters ---------- gdir : GlacierDirectory the glacier directory temp_bias_ts : pandas DataFrame the temperature bias timeseries (in °C) (index: time as years) prcp_fac_ts : pandas DataFrame the precipitaion bias timeseries (in % change, positive or negative) (index: time as years) bias : float, optional set to the alternative value of the annual bias [mm we yr-1] you want to use (the default is to use the calibrated value) y0 : int the year at the center of the period of interest. Has to be set! halfsize : int, optional the half-size of the time window (window size = 2 * halfsize + 1) filename : str, optional set to a different BASENAME if you want to use alternative climate data. input_filesuffix : str the file suffix of the input climate file """ super(BiasedConstantMassBalance, self).__init__() self.mbmod = ConstantMassBalance( gdir, bias=bias, y0=y0, halfsize=halfsize, filename=filename, input_filesuffix=input_filesuffix, **kwargs ) self.valid_bounds = self.mbmod.valid_bounds self.hemisphere = gdir.hemisphere self.is_year_valid = self.mbmod.is_year_valid # Set ys and ye self.ys = int(temp_bias_ts.index[0]) self.ye = int(temp_bias_ts.index[-1]) if prcp_fac_ts is None: prcp_fac_ts = temp_bias_ts * 0 self.prcp_fac_ts = self.mbmod.prcp_fac + prcp_fac_ts self.temp_bias_ts = self.mbmod.temp_bias + temp_bias_ts @property def temp_bias(self): """Temperature bias to add to the original series.""" return self.mbmod.temp_bias @temp_bias.setter def temp_bias(self, value): """Temperature bias to add to the original series.""" self.mbmod.temp_bias = value @property def prcp_fac(self): """Precipitation factor to apply to the original series.""" return self.mbmod.prcp_fac @prcp_fac.setter def prcp_fac(self, value): """Precipitation factor to apply to the original series.""" self.mbmod.prcp_fac = value @property def bias(self): """Residual bias to apply to the original series.""" return self.mbmod.bias @bias.setter def bias(self, value): """Residual bias to apply to the original series.""" self.mbmod.bias = value def _check_bias(self, year): t = np.asarray(self.temp_bias_ts.loc[int(year)]) if np.any(t != self.temp_bias): self.temp_bias = t p = np.asarray(self.prcp_fac_ts.loc[int(year)]) if np.any(p != self.prcp_fac): self.prcp_fac = p def get_monthly_mb(self, heights, year=None, **kwargs): self._check_bias(year) return self.mbmod.get_monthly_mb(heights, year=year, **kwargs) def get_annual_mb(self, heights, year=None, **kwargs): self._check_bias(year) return self.mbmod.get_annual_mb(heights, year=year, **kwargs) @entity_task(log) def METHOD_NAME( gdir, temp_bias_ts=None, prcp_fac_ts=None, ys=None, ye=None, y0=2014, halfsize=5, climate_filename="climate_historical", climate_input_filesuffix="", output_filesuffix="", init_model_fls=None, init_model_filesuffix=None, init_model_yr=None, bias=0, **kwargs ): """Runs a glacier with temperature and precipitation correction timeseries. Parameters ---------- gdir : :py:class:`oggm.GlacierDirectory` the glacier directory to process temp_bias_ts : pandas DataFrame the temperature bias timeseries (in °C) (index: time as years) prcp_fac_ts : pandas DataFrame the precipitaion bias timeseries (in % change, positive or negative) (index: time as years) y0 : int central year of the constant climate period halfsize : int half-size of the constant climate period climate_filename : str name of the climate file, e.g. 'climate_historical' (default) or 'gcm_data' climate_input_filesuffix: str filesuffix for the input climate file output_filesuffix : str for the output file init_model_filesuffix : str if you want to start from a previous model run state. Can be combined with `init_model_yr` init_model_yr : int the year of the initial run you want to start from. The default is to take the last year available bias : float bias of the mb model. Default is to use the calibrated one, which is zero usually anyways. kwargs : dict kwargs to pass to the FluxBasedModel instance """ if init_model_filesuffix is not None: fp = gdir.get_filepath("model_geometry", filesuffix=init_model_filesuffix) fmod = FileModel(fp) if init_model_yr is None: init_model_yr = fmod.last_yr # Avoid issues here if init_model_yr > fmod.y0: fmod.run_until(init_model_yr) else: fmod.run_until(fmod.y0) init_model_fls = fmod.fls # Final crop mb = BiasedConstantMassBalance( gdir, temp_bias_ts=temp_bias_ts, prcp_fac_ts=prcp_fac_ts, y0=y0, bias=bias, halfsize=halfsize, filename=climate_filename, input_filesuffix=climate_input_filesuffix, ) # Decide from climate if ye is None: ye = mb.ye if ys is None: ys = mb.ys return flowline_model_run( gdir, output_filesuffix=output_filesuffix, mb_model=mb, ys=ys, ye=ye, init_model_fls=init_model_fls, **kwargs )
5,616
handle gesture complete
''' Multistroke Recognition Database Demonstration ============================================== This application records gestures and attempts to match them. You should see a black drawing surface with some buttons across the bottom. As you make a gesture on the drawing surface, the gesture will be added to the history and a match will be attempted. If you go to the history tab, name the gesture, and add it to the database, then similar gestures in the future will be recognized. You can load and save databases of gestures in .kg files. This demonstration code spans many files, with this being the primary file. The information pop-up ('No match') comes from the file helpers.py. The history pane is managed in the file historymanager.py and described in the file historymanager.kv. The database pane and storage is managed in the file gesturedatabase.py and the described in the file gesturedatabase.kv. The general logic of the sliders and buttons are in the file settings.py and described in settings.kv. but the actual settings pane is described in the file multistroke.kv and managed from this file. ''' from kivy.app import App from kivy.uix.gridlayout import GridLayout from kivy.uix.gesturesurface import GestureSurface from kivy.uix.screenmanager import ScreenManager, Screen, SlideTransition from kivy.uix.label import Label from kivy.multistroke import Recognizer # Local libraries from historymanager import GestureHistoryManager from gesturedatabase import GestureDatabase from settings import MultistrokeSettingsContainer class MainMenu(GridLayout): pass class MultistrokeAppSettings(MultistrokeSettingsContainer): pass class MultistrokeApp(App): def goto_database_screen(self, *l): self.database.import_gdb() self.manager.current = 'database' def handle_gesture_cleanup(self, surface, g, *l): if hasattr(g, '_result_label'): surface.remove_widget(g._result_label) def handle_gesture_discard(self, surface, g, *l): # Don't bother creating Label if it's not going to be drawn if surface.draw_timeout == 0: return text = '[b]Discarded:[/b] Not enough input' g._result_label = Label(text=text, markup=True, size_hint=(None, None), center=(g.bbox['minx'], g.bbox['miny'])) self.surface.add_widget(g._result_label) def METHOD_NAME(self, surface, g, *l): result = self.recognizer.recognize(g.get_vectors()) result._gesture_obj = g result.bind(on_complete=self.handle_recognize_complete) def handle_recognize_complete(self, result, *l): self.history.add_recognizer_result(result) # Don't bother creating Label if it's not going to be drawn if self.surface.draw_timeout == 0: return best = result.best if best['name'] is None: text = '[b]No match[/b]' else: text = 'Name: [b]%s[/b]\nScore: [b]%f[/b]\nDistance: [b]%f[/b]' % ( best['name'], best['score'], best['dist']) g = result._gesture_obj g._result_label = Label(text=text, markup=True, size_hint=(None, None), center=(g.bbox['minx'], g.bbox['miny'])) self.surface.add_widget(g._result_label) def build(self): # Setting NoTransition breaks the "history" screen! Possibly related # to some inexplicable rendering bugs on my particular system self.manager = ScreenManager(transition=SlideTransition( duration=.15)) self.recognizer = Recognizer() # Setup the GestureSurface and bindings to our Recognizer surface = GestureSurface(line_width=2, draw_bbox=True, use_random_color=True) surface_screen = Screen(name='surface') surface_screen.add_widget(surface) self.manager.add_widget(surface_screen) surface.bind(on_gesture_discard=self.handle_gesture_discard) surface.bind(on_gesture_complete=self.METHOD_NAME) surface.bind(on_gesture_cleanup=self.handle_gesture_cleanup) self.surface = surface # History is the list of gestures drawn on the surface history = GestureHistoryManager() history_screen = Screen(name='history') history_screen.add_widget(history) self.history = history self.manager.add_widget(history_screen) # Database is the list of gesture templates in Recognizer database = GestureDatabase(recognizer=self.recognizer) database_screen = Screen(name='database') database_screen.add_widget(database) self.database = database self.manager.add_widget(database_screen) # Settings screen app_settings = MultistrokeAppSettings() ids = app_settings.ids ids.max_strokes.bind(value=surface.setter('max_strokes')) ids.temporal_win.bind(value=surface.setter('temporal_window')) ids.timeout.bind(value=surface.setter('draw_timeout')) ids.line_width.bind(value=surface.setter('line_width')) ids.draw_bbox.bind(value=surface.setter('draw_bbox')) ids.use_random_color.bind(value=surface.setter('use_random_color')) settings_screen = Screen(name='settings') settings_screen.add_widget(app_settings) self.manager.add_widget(settings_screen) # Wrap in a gridlayout so the main menu is always visible layout = GridLayout(cols=1) layout.add_widget(self.manager) layout.add_widget(MainMenu()) return layout if __name__ in ('__main__', '__android__'): MultistrokeApp().run()
5,617
wrapper
""" Simulation of ADF z-test critical values. Closely follows MacKinnon (2010). Running this files requires an IPython cluster, which is assumed to be on the local machine. This can be started using a command similar to ipcluster start -n 4 Remote clusters can be used by modifying the Client initiation. This version has been optimized for execution on a large cluster and should scale well with 128 or more engines. """ from __future__ import annotations import datetime import time from typing import cast from ipyparallel import Client, DirectView from numpy import array, nan, ndarray, percentile, savez from arch.typing import UnitRootTrend from .adf_simulation import adf_simulation # Time in seconds to sleep before checking if ready SLEEP = 10 # Number of repetitions EX_NUM = 500 # Number of simulations per exercise EX_SIZE = 200000 # Approximately controls memory use, in MiB MAX_MEMORY_SIZE = 100 rc = Client() dview = rc.direct_view() with dview.sync_imports(): from numpy import arange, zeros from numpy.random import RandomState def clear_cache(client: Client, view: DirectView) -> None: """Cache-clearing function from mailing list""" assert not rc.outstanding, "don't clear history when tasks are outstanding" client.purge_results("all") # clears controller client.results.clear() client.metadata.clear() view.results.clear() client.history = [] view.history = [] client.session.digest_history.clear() def METHOD_NAME(n: int, trend: UnitRootTrend, b: int, rng_seed: int = 0) -> ndarray: """ Wraps and blocks the main simulation so that the maximum amount of memory can be controlled on multi processor systems when executing in parallel """ rng = RandomState() rng.seed(rng_seed) remaining = b res = zeros(b) finished = 0 block_size = int(2**20.0 * MAX_MEMORY_SIZE / (8.0 * n)) for _ in range(0, b, block_size): if block_size < remaining: count = block_size else: count = remaining st = finished en = finished + count res[st:en] = adf_simulation(n, trend, count, rng) finished += count remaining -= count return res # Push variables and functions to all engines dview.execute("import numpy as np") dview["MAX_MEMORY_SIZE"] = MAX_MEMORY_SIZE dview["wrapper"] = METHOD_NAME dview["adf_simulation"] = adf_simulation lview = rc.load_balanced_view() trends = ("n", "c", "ct", "ctt") T = array( ( 20, 25, 30, 35, 40, 45, 50, 60, 70, 80, 90, 100, 120, 140, 160, 180, 200, 250, 300, 350, 400, 450, 500, 600, 700, 800, 900, 1000, 1200, 1400, 2000, ) ) T = T[::-1] m = T.shape[0] percentiles = list(arange(0.5, 100.0, 0.5)) rng = RandomState(0) seeds = list(rng.random_integers(0, 2**31 - 2, size=EX_NUM)) for tr in trends: results = cast(ndarray, zeros((len(percentiles), m, EX_NUM)) * nan) filename = "adf_z_" + tr + ".npz" for i, t in enumerate(T): print(f"Time series length {t} for Trend {tr}") now = datetime.datetime.now() # Serial version # args = ([t] * EX_NUM, [tr] * EX_NUM, [EX_SIZE] * EX_NUM, seeds) # out = [ wrapper(a, b, c, d) for a, b, c, d in zip(*args)] # Parallel version res = lview.map_async( METHOD_NAME, [t] * EX_NUM, [tr] * EX_NUM, [EX_SIZE] * EX_NUM, seeds ) sleep_count = 0 while not res.ready(): sleep_count += 1 elapsed = datetime.datetime.now() - now if sleep_count % 10: print(f"Elapsed time {elapsed}, waiting for results") time.sleep(SLEEP) out = res.get() # Prevent unnecessary results from accumulating clear_cache(rc, lview) elapsed = datetime.datetime.now() - now print(f"Total time {elapsed} for T={t}") quantiles = [percentile(x, percentiles) for x in out] results[:, i, :] = cast(ndarray, array(quantiles).T) savez(filename, trend=tr, results=results, percentiles=percentiles, T=T)
5,618
visualize
import fcntl import os import time import threading from binaryninja import PluginCommand, HighlightStandardColor, log, BackgroundTaskThread from binaryninja.interaction import ( get_open_filename_input, get_directory_name_input, get_choice_input, ) import binaryninja.enums as enums blue = HighlightStandardColor.BlueHighlightColor black = HighlightStandardColor.BlackHighlightColor white = HighlightStandardColor.WhiteHighlightColor clear = HighlightStandardColor.NoHighlightColor # renew interval interval = 3 class Singleton(type): _instances = {} def __call__(cls, *args, **kwargs): if cls not in cls._instances: cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) return cls._instances[cls] class TraceVisualizer: __metaclass__ = Singleton def __init__(self, view, workspace, base=0x0, live=False): self.view = view self.workspace = workspace self.base = base # highlighted addresses self.highlighted = set() # covered basic blocks self.cov_bb = set() # comments inserted self.cov_comments = set() self.current_function = None self.live_update = live def METHOD_NAME(self): """ Given a Manticore workspace, or trace file, highlight the basic blocks. """ if os.path.isfile(self.workspace): t = threading.Thread(target=self.highlight_from_file, args=(self.workspace,)) elif os.path.isdir(self.workspace): t = threading.Thread(target=self.highlight_from_dir, args=(self.workspace,)) t.start() def highlight_from_file(self, tracefile): while True: self.process_trace(tracefile) if not self.live_update: break time.sleep(interval) def highlight_from_dir(self, workspace_dir): while True: for f in os.listdir(workspace_dir): if f.endswith("trace"): self.process_trace(os.path.join(workspace_dir, f)) if not self.live_update: break time.sleep(interval) def process_trace(self, tracefile): trace_addr = set() with open(tracefile, "r") as f: for line in f: trace_addr.add(int(line.strip(), 0) - self.base) for addr in trace_addr - self.highlighted: self.highlight_addr(addr, blue) def highlight_addr(self, addr, hl): blocks = self.view.get_basic_blocks_at(addr) if blocks: blocks[0].set_user_highlight(hl) if self.live_update and blocks[0].function != self.current_function: self.current_function = blocks[0].function self.view.file.navigate(self.view.file.view, blocks[0].start) self.highlighted.add(addr) self.cov_bb.add(blocks[0].start) if self.live_update: blocks[0].function.set_auto_instr_highlight(addr, white) time.sleep(0.1) blocks[0].function.set_auto_instr_highlight(addr, clear) def highlight_block(self, addr, hl): blocks = self.view.get_basic_blocks_at(addr) for b in blocks: b.set_user_highlight(hl) def set_comment_at_xref(self, xref, comment): try: op = xref.function.get_lifted_il_at(xref.address).operation except IndexError: w = "ManticoreTrace: Could not lookup " + hex(xref.address) w += " address for function " + str(xref.function) log.log_warn(w) return if not ( op == enums.LowLevelILOperation.LLIL_CALL or op == enums.LowLevelILOperation.LLIL_JUMP or op == enums.LowLevelILOperation.LLIL_JUMP_TO or op == enums.LowLevelILOperation.LLIL_SYSCALL or op == enums.LowLevelILOperation.LLIL_GOTO ): return self.cov_comments.add((xref.function, xref.address)) xref.function.set_comment_at(xref.address, comment) def clear_stats(self): self.highlighted.clear() self.cov_bb.clear() for fun, addr in self.cov_comments: fun.set_comment_at(addr, None) class CoverageHelper(BackgroundTaskThread): def __init__(self, view, tv): self.tv = tv self.view = view BackgroundTaskThread.__init__(self, "Calculating Coverage", True) def run(self): # function cumulative bb coverage # key: function address # values: [total basic blocks covered, xrefs to function] fun_cov = {f.start: [0, 0] for f in self.view.functions} fun_xrefs = sorted( [(f, self.view.get_code_refs(f.start)) for f in self.view.functions], key=lambda x: len(x[1]), ) for f, xrefs in fun_xrefs: if not f.basic_blocks: continue cov = len( (set([b.start for b in f.basic_blocks]).intersection(self.tv.cov_bb)) ) / float(len(set(f.basic_blocks))) fun_cov[f.start][0] += cov for xref_f in xrefs: fun_cov[xref_f.function.start][0] += cov fun_cov[xref_f.function.start][1] += 1 for f, xrefs in fun_xrefs: cov = str((fun_cov[f.start][0] * 100.0) / (fun_cov[f.start][1] + 1)) cov += "% cumulative BB coverage" f.set_comment(f.start, "Function Stats: \n" + cov) for xref in xrefs: self.tv.set_comment_at_xref(xref, cov) def get_workspace(): choice = get_choice_input("Select Trace Type", "Input", ["Trace File", "Manticore Workspace"]) if choice == 0: workspace = get_open_filename_input("Trace File") else: workspace = get_directory_name_input("Workspace Directory") return workspace def viz_trace(view): """ Given a Manticore trace file, highlight the basic blocks. """ tv = TraceVisualizer(view, None) if tv.workspace is None: tv.workspace = get_workspace() tv.METHOD_NAME() def viz_live_trace(view): """ Given a Manticore trace file, highlight the basic blocks. """ tv = TraceVisualizer(view, None, live=True) if tv.workspace is None: tv.workspace = get_workspace() # update due to singleton in case we are called after a clear tv.live_update = True tv.METHOD_NAME() def get_coverage(view): tv = TraceVisualizer(view, None, live=False) if tv.workspace is None: tv.workspace = get_workspace() tv.METHOD_NAME() c = CoverageHelper(view, tv) c.start() def clear_all(view): tv = TraceVisualizer(view, None) for addr in tv.highlighted: tv.highlight_block(addr, clear) tv.clear_stats() tv.workspace = None tv.live_update = False PluginCommand.register( "ManticoreTrace: Highlight", "Highlight Manticore Execution Trace", viz_trace ) PluginCommand.register( "ManticoreTrace: BB Coverage", "Compute cumulative BB coverage for each function ", get_coverage ) PluginCommand.register( "ManticoreTrace: Live Highlight", "Highlight Manticore Execution Trace at Real-Time", viz_live_trace, ) PluginCommand.register("ManticoreTrace: Clear", "Clear Manticore Trace Highlight", clear_all)
5,619
items
# Copyright (c) 2005 The Regents of The University of Michigan # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. __all__ = ["multidict"] class multidict(object): def __init__(self, parent={}, **kwargs): self.local = dict(**kwargs) self.parent = parent self.deleted = {} def __str__(self): return str(dict(self.METHOD_NAME())) def __repr__(self): return repr(dict(list(self.METHOD_NAME()))) def __contains__(self, key): return key in self.local or key in self.parent def __delitem__(self, key): try: del self.local[key] except KeyError as e: if key in self.parent: self.deleted[key] = True else: raise KeyError(e) def __setitem__(self, key, value): self.deleted.pop(key, False) self.local[key] = value def __getitem__(self, key): try: return self.local[key] except KeyError as e: if not self.deleted.get(key, False) and key in self.parent: return self.parent[key] else: raise KeyError(e) def __len__(self): return len(self.local) + len(self.parent) def next(self): for key, value in self.local.METHOD_NAME(): yield key, value if self.parent: for key, value in self.parent.next(): if key not in self.local and key not in self.deleted: yield key, value def has_key(self, key): return key in self def METHOD_NAME(self): for item in self.next(): yield item def keys(self): for key, value in self.next(): yield key def values(self): for key, value in self.next(): yield value def get(self, key, default=None): try: return self[key] except KeyError as e: return default def setdefault(self, key, default): try: return self[key] except KeyError: self.deleted.pop(key, False) self.local[key] = default return default def _dump(self): print("multidict dump") node = self while isinstance(node, multidict): print(" ", node.local) node = node.parent def _dumpkey(self, key): values = [] node = self while isinstance(node, multidict): if key in node.local: values.append(node.local[key]) node = node.parent print(key, values) if __name__ == "__main__": test1 = multidict() test2 = multidict(test1) test3 = multidict(test2) test4 = multidict(test3) test1["a"] = "test1_a" test1["b"] = "test1_b" test1["c"] = "test1_c" test1["d"] = "test1_d" test1["e"] = "test1_e" test2["a"] = "test2_a" del test2["b"] test2["c"] = "test2_c" del test1["a"] test2.setdefault("f", multidict) print("test1>", list(test1.METHOD_NAME())) print("test2>", list(test2.METHOD_NAME())) # print(test1['a']) print(test1["b"]) print(test1["c"]) print(test1["d"]) print(test1["e"]) print(test2["a"]) # print(test2['b']) print(test2["c"]) print(test2["d"]) print(test2["e"]) for key in test2.keys(): print(key) test2.get("g", "foo") # test2.get('b') test2.get("b", "bar") test2.setdefault("b", "blah") print(test1) print(test2) print(repr(test2)) print(len(test2)) test3["a"] = [0, 1, 2, 3] print(test4)
5,620
test time freq resolution
# Copyright (C) 2022 ASTRON (Netherlands Institute for Radio Astronomy) # SPDX-License-Identifier: GPL-3.0-or-later import pytest import shutil import os import sys import uuid from subprocess import check_call # Append current directory to system path in order to import testconfig sys.path.append(".") import testconfig as tcf from utils import assert_taql, untar_ms """ Tests for applying the beam model. Script can be invoked in two ways: - as standalone from the build/steps/test/integration directory, using `pytest source/tDemix.py` (extended with pytest options of your choice) - using ctest, see DP3/steps/test/integration/CMakeLists.txt """ MSIN = "tDemix.in_MS" CWD = os.getcwd() common_args = [ "msin=tDemix_tmp/tDemix.MS", "msout=tDemix_out.MS", "msout.overwrite=True", "msout.tilesize=1", "msin.datacolumn=DATA", "msout.datacolumn=DATA", "steps=[demix]", "demix.type=demixer", "demix.corrtype=cross", "demix.baseline='CS00[0-9]HBA0&'", "demix.demixfreqstep=64", "demix.demixtimestep=10", "demix.instrumentmodel='tDemix_tmp/instrument'", "demix.subtractsources=[CasA]", ] skymodel_arg = "demix.skymodel='tDemix_tmp/{}'" @pytest.fixture(autouse=True) def source_env(): os.chdir(CWD) tmpdir = str(uuid.uuid4()) os.mkdir(tmpdir) os.chdir(tmpdir) untar_ms(f"{tcf.RESOURCEDIR}/{MSIN}.tgz") check_call( [ tcf.MAKESOURCEDBEXE, "in=tDemix_tmp/sky.txt", "out=tDemix_tmp/sourcedb", ] ) # Tests are executed here yield # Post-test: clean up os.chdir(CWD) shutil.rmtree(tmpdir) @pytest.mark.parametrize("skymodel", ["sky.txt", "sourcedb"]) def test_without_target(skymodel): check_call( [ tcf.DP3EXE, "demix.ignoretarget=true", "demix.freqstep=64", "demix.timestep=10", skymodel_arg.format(skymodel), ] + common_args ) # Compare some columns of the output MS with the reference output. taql_command = f"select from tDemix_out.MS t1, tDemix_tmp/tDemix_ref1.MS t2 where not all(near(t1.DATA,t2.DATA,1e-3) || (isnan(t1.DATA) && isnan(t2.DATA))) || not all(t1.FLAG = t2.FLAG) || not all(near(t1.WEIGHT_SPECTRUM, t2.WEIGHT_SPECTRUM)) || t1.ANTENNA1 != t2.ANTENNA1 || t1.ANTENNA2 != t2.ANTENNA2 || t1.TIME !~= t2.TIME" assert_taql(taql_command) @pytest.mark.parametrize("skymodel", ["sky.txt", "sourcedb"]) def test_with_target_projected_away(skymodel): check_call( [ tcf.DP3EXE, "demix.ignoretarget=false", "demix.freqstep=64", "demix.timestep=10", skymodel_arg.format(skymodel), ] + common_args ) # Compare some columns of the output MS with the reference output. taql_command = f"select from tDemix_out.MS t1, tDemix_tmp/tDemix_ref2.MS t2 where not all(near(t1.DATA,t2.DATA,1e-3) || (isnan(t1.DATA) && isnan(t2.DATA))) || not all(t1.FLAG = t2.FLAG) || not all(near(t1.WEIGHT_SPECTRUM, t2.WEIGHT_SPECTRUM)) || t1.ANTENNA1 != t2.ANTENNA1 || t1.ANTENNA2 != t2.ANTENNA2 || t1.TIME !~= t2.TIME" assert_taql(taql_command) @pytest.mark.parametrize("skymodel", ["sky.txt", "sourcedb"]) def test_with_target(skymodel): check_call( [ tcf.DP3EXE, "demix.targetsource=CIZA.SP1A.FITS.pbcor_patch_s537", "demix.freqstep=32", "demix.timestep=5", "demix.maxiter=100", skymodel_arg.format(skymodel), ] + common_args ) # Compare some columns of the output MS with the reference output. taql_command = f"select from tDemix_out.MS t1, tDemix_tmp/tDemix_ref3.MS t2 where not all(near(t1.DATA,t2.DATA,1e-3) || (isnan(t1.DATA) && isnan(t2.DATA))) || not all(t1.FLAG = t2.FLAG) || not all(near(t1.WEIGHT_SPECTRUM, t2.WEIGHT_SPECTRUM)) || t1.ANTENNA1 != t2.ANTENNA1 || t1.ANTENNA2 != t2.ANTENNA2 || t1.TIME !~= t2.TIME" assert_taql(taql_command) def METHOD_NAME(): check_call( [ tcf.DP3EXE, "demix.ignoretarget=true", "demix.freqstep=64", "demix.timestep=10", "demix.demixfreqresolution=200kHz", "demix.demixtimeresolution=10.0139", "demix.skymodel=tDemix_tmp/sky.txt", ] + common_args ) # Compare some columns of the output MS with the reference output. taql_command = f"select from tDemix_out.MS t1, tDemix_tmp/tDemix_ref1.MS t2 where not all(near(t1.DATA,t2.DATA,1e-3) || (isnan(t1.DATA) && isnan(t2.DATA))) || not all(t1.FLAG = t2.FLAG) || not all(near(t1.WEIGHT_SPECTRUM, t2.WEIGHT_SPECTRUM)) || t1.ANTENNA1 != t2.ANTENNA1 || t1.ANTENNA2 != t2.ANTENNA2 || t1.TIME !~= t2.TIME" assert_taql(taql_command)
5,621
resource path summary provider
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ast import json """ LLDB type summary providers for common Firestore types. This is primarily useful for debugging Firestore internals. It will add useful summaries and consolidate common types in a way that makes them easier to observe in the debugger. Use this by adding the following to your ~/.lldbinit file: command script import ~/path/to/firebase-ios-sdk/scripts/lldb/firestore.py Most of this implementation is based on "Variable Formatting" in the LLDB online manual: https://lldb.llvm.org/use/variable.html. There are two major features we're making use of: * Summary Providers: these are classes or functions that take an object and produce a (typically one line) summary of the type * Synthetic Children Providers: these are classes that provide an alternative view of the data. The children that are synthesized here show up in the graphical debugger. """ class ForwardingSynthProvider(object): """A synthetic child provider that forwards all methods to another provider. Override the `delegate` method to customize the target to which this forwards. """ def __init__(self, value, params): self.value = value def delegate(self): return self.value def has_children(self): return self.delegate().MightHaveChildren() def num_children(self): return self.delegate().GetNumChildren() def get_child_index(self, name): return self.delegate().GetIndexOfChildWithName(name) def get_child_at_index(self, index): return self.delegate().GetChildAtIndex(index) def update(self): # No additional state so nothing needs updating when the value changes. pass # Abseil class AbseilOptional_SynthProvider(object): """A synthetic child provider that hides the internals of absl::optional. """ def __init__(self, value, params): self.value = value self.engaged = None self.data = None def update(self): # Unwrap all the internal optional_data and similar types value = self.value while True: if value.GetNumChildren() <= 0: break child = value.GetChildAtIndex(0) if not child.IsValid(): break if 'optional_internal' not in child.GetType().GetName(): break value = child # value should now point to the innermost absl::optional container type. self.engaged = value.GetChildMemberWithName('engaged_') if self.has_children(): self.data = value.GetChildMemberWithName('data_') else: self.data = None def has_children(self): return self.engaged.GetValueAsUnsigned(0) != 0 def num_children(self): return 2 if self.has_children() else 1 def get_child_index(self, name): if name == 'engaged_': return 0 if name == 'data_': return 1 return -1 def get_child_at_index(self, index): if index == 0: return self.engaged if index == 1: return self.data def AbseilOptional_SummaryProvider(value, params): # Operates on the synthetic children above, calling has_children. return 'engaged={0}'.format(format_bool(value.MightHaveChildren())) # model class DatabaseId_SynthProvider(ForwardingSynthProvider): """Makes DatabaseId behave as if `*rep_` were inline, hiding its `shared_ptr<Rep>` implementation details. """ def delegate(self): return deref_shared(self.value.GetChildMemberWithName('rep_')) def DatabaseId_SummaryProvider(value, params): # Operates on the result of the SynthProvider; value is *rep_. parts = [ get_string(value.GetChildMemberWithName('project_id')), get_string(value.GetChildMemberWithName('database_id')) ] return format_string('/'.join(parts)) def DocumentKey_SummaryProvider(value, params): """Summarizes DocumentKey as if path_->segments_ were inline and a single, slash-delimited string like `"users/foo"`. """ return deref_shared(value.GetChildMemberWithName('path_')).GetSummary() def METHOD_NAME(value, params): """Summarizes ResourcePath as if segments_ were a single string, slash-delimited string like `"users/foo"`. """ segments = value.GetChildMemberWithName('segments_') segment_text = [get_string(child) for child in segments] return format_string('/'.join(segment_text)) # api def DocumentReference_SummaryProvider(value, params): """Summarizes DocumentReference as a single slash-delimited string like `"users/foo"`. """ return value.GetChildMemberWithName('key_').GetSummary() def DocumentSnapshot_SummaryProvider(value, params): """Summarizes DocumentSnapshot as a single slash-delimited string like `"users/foo"` that names the path of the document in the snapshot. """ return value.GetChildMemberWithName('internal_key_').GetSummary() # Objective-C def FIRDocumentReference_SummaryProvider(value, params): return value.GetChildMemberWithName('_documentReference').GetSummary() def FIRDocumentSnapshot_SummaryProvider(value, params): return value.GetChildMemberWithName('_snapshot').GetSummary() def get_string(value): """Returns a Python string from the underlying LLDB SBValue.""" # TODO(wilhuff): Actually use the SBData API to get this. # Get the summary as a C literal and parse it (for now). Using the SBData # API would allow this to directly read the string contents. summary = value.GetSummary() return ast.literal_eval(summary) def format_string(string): """Formats a Python string as a C++ string literal.""" # JSON and C escapes work the ~same. return json.dumps(string) def format_bool(value): """Formats a Python value as a C++ bool literal.""" return 'true' if value else 'false' def deref_shared(value): """Dereference a shared_ptr.""" return value.GetChildMemberWithName('__ptr_').Dereference() def __lldb_init_module(debugger, params): def run(command): debugger.HandleCommand(command) def add_summary(provider, typename, *args): args = ' '.join(args) run('type summary add -w firestore -F {0} {1} {2}'.format( qname(provider), args, typename)) def add_synthetic(provider, typename, *args): args = ' '.join(args) run('type synthetic add -l {0} -w firestore {1} {2}'.format( qname(provider), args, typename)) optional_matcher = '-x absl::[^:]*::optional<.*>' add_summary(AbseilOptional_SummaryProvider, optional_matcher, '-e') add_synthetic(AbseilOptional_SynthProvider, optional_matcher) api = 'firebase::firestore::api::' add_summary(DocumentReference_SummaryProvider, api + 'DocumentReference') add_summary(DocumentSnapshot_SummaryProvider, api + 'DocumentSnapshot', '-e') model = 'firebase::firestore::model::' add_summary(DocumentKey_SummaryProvider, model + 'DocumentKey') add_summary(METHOD_NAME, model + 'ResourcePath') add_summary(DatabaseId_SummaryProvider, model + 'DatabaseId') add_synthetic(DatabaseId_SynthProvider, model + 'DatabaseId') add_summary(FIRDocumentReference_SummaryProvider, 'FIRDocumentReference') add_summary(FIRDocumentSnapshot_SummaryProvider, 'FIRDocumentSnapshot', '-e') run('type category enable firestore') def qname(fn): """Returns the module-qualified name of the given class or function.""" return '{0}.{1}'.format(__name__, fn.__name__)
5,622
test evaluation runs
# Copyright (c) Recommenders contributors. # Licensed under the MIT License. import sys import pytest try: import papermill as pm except ImportError: pass # disable error while collecting tests for non-notebook environments from recommenders.utils.constants import ( DEFAULT_RATING_COL, DEFAULT_USER_COL, DEFAULT_ITEM_COL, ) # This is a flaky test that can fail unexpectedly @pytest.mark.flaky(reruns=5, reruns_delay=2) @pytest.mark.notebooks @pytest.mark.spark @pytest.mark.skipif( sys.platform == "win32", reason="Takes 1087.56s in Windows, while in Linux 52.51s" ) def test_als_pyspark_runs(notebooks, output_notebook, kernel_name): notebook_path = notebooks["als_pyspark"] pm.execute_notebook( notebook_path, output_notebook, kernel_name=kernel_name, parameters=dict( MOVIELENS_DATA_SIZE="mock100", COL_USER=DEFAULT_USER_COL, COL_ITEM=DEFAULT_ITEM_COL, COL_RATING=DEFAULT_RATING_COL, ), ) @pytest.mark.notebooks @pytest.mark.spark def test_data_split_runs(notebooks, output_notebook, kernel_name): notebook_path = notebooks["data_split"] pm.execute_notebook(notebook_path, output_notebook, kernel_name=kernel_name) # This is a flaky test that can fail unexpectedly @pytest.mark.flaky(reruns=5, reruns_delay=3) @pytest.mark.notebooks @pytest.mark.spark @pytest.mark.skipif( sys.platform == "win32", reason="Takes 2764.50s in Windows, while in Linux 124.35s" ) def test_als_deep_dive_runs(notebooks, output_notebook, kernel_name): notebook_path = notebooks["als_deep_dive"] pm.execute_notebook( notebook_path, output_notebook, kernel_name=kernel_name, parameters=dict( MOVIELENS_DATA_SIZE="mock100", COL_USER=DEFAULT_USER_COL, COL_ITEM=DEFAULT_ITEM_COL, COL_RATING=DEFAULT_RATING_COL, ), ) # This is a flaky test that can fail unexpectedly @pytest.mark.flaky(reruns=5, reruns_delay=3) @pytest.mark.notebooks @pytest.mark.spark @pytest.mark.skipif( sys.platform == "win32", reason="Takes 583.75s in Windows, while in Linux 71.77s" ) def METHOD_NAME(notebooks, output_notebook, kernel_name): notebook_path = notebooks["evaluation"] pm.execute_notebook(notebook_path, output_notebook, kernel_name=kernel_name) # This is a flaky test that can fail unexpectedly @pytest.mark.flaky(reruns=5, reruns_delay=2) @pytest.mark.notebooks @pytest.mark.spark def test_evaluation_diversity_runs(notebooks, output_notebook, kernel_name): notebook_path = notebooks["evaluation_diversity"] pm.execute_notebook( notebook_path, output_notebook, kernel_name=kernel_name, parameters=dict( TOP_K=10, MOVIELENS_DATA_SIZE="mock100", COL_USER=DEFAULT_USER_COL, COL_ITEM=DEFAULT_ITEM_COL, COL_RATING=DEFAULT_RATING_COL, ), ) # This is a flaky test that can fail unexpectedly @pytest.mark.flaky(reruns=5, reruns_delay=2) @pytest.mark.notebooks @pytest.mark.spark @pytest.mark.skipif( sys.platform == "win32", reason="Takes 2409.69s in Windows, while in Linux 138.30s" ) def test_spark_tuning(notebooks, output_notebook, kernel_name): notebook_path = notebooks["spark_tuning"] pm.execute_notebook( notebook_path, output_notebook, kernel_name=kernel_name, parameters=dict( MOVIELENS_DATA_SIZE="mock100", NUMBER_CORES="1", NUMBER_ITERATIONS=3, SUBSET_RATIO=0.5, RANK=[5, 10], REG=[0.1, 0.01], ), ) @pytest.mark.notebooks @pytest.mark.spark @pytest.mark.skipif(sys.platform == "win32", reason="Not implemented on Windows") def test_mmlspark_lightgbm_criteo_runs(notebooks, output_notebook, kernel_name): notebook_path = notebooks["mmlspark_lightgbm_criteo"] pm.execute_notebook( notebook_path, output_notebook, kernel_name=kernel_name, parameters=dict(DATA_SIZE="sample", NUM_ITERATIONS=10), )
5,623
test sb rules
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # AZURE CLI SERVICEBUS - CRUD TEST DEFINITIONS import time from azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer, live_only) from knack.util import CLIError # pylint: disable=line-too-long # pylint: disable=too-many-lines class SBRulesCRUDScenarioTest(ScenarioTest): from azure.cli.testsdk.scenario_tests import AllowLargeResponse @AllowLargeResponse() @ResourceGroupPreparer(name_prefix='cli_test_sb_rules') def METHOD_NAME(self, resource_group): self.kwargs.update({ 'namespacename': self.create_random_name(prefix='sb-nscli', length=20), 'tags': {'tag1: value1', 'tag2: value2'}, 'sku': 'Standard', 'tier': 'Standard', 'authoname': self.create_random_name(prefix='cliAutho', length=20), 'defaultauthorizationrule': 'RootManageSharedAccessKey', 'accessrights': 'Listen', 'primary': 'PrimaryKey', 'secondary': 'SecondaryKey', 'topicname': self.create_random_name(prefix='sb-topiccli', length=25), 'topicauthoname': self.create_random_name(prefix='cliTopicAutho', length=25), 'subscriptionname': self.create_random_name(prefix='sb-subscli', length=25), 'rulename': self.create_random_name(prefix='sb-rulecli', length=25), 'rulename2': self.create_random_name(prefix='sb-rulecli2', length=20), 'rulename3': self.create_random_name(prefix='sb-rulecli3', length=20), 'sqlexpression': 'test=test', 'sqlexpression1': 'test1=test1' }) # Create Namespace self.cmd( 'servicebus namespace create --resource-group {rg} --name {namespacename} --tags {tags} --sku {sku}', checks=[self.check('sku.name', '{sku}')]) # Get Created Namespace self.cmd('servicebus namespace show --resource-group {rg} --name {namespacename}', checks=[self.check('sku.name', '{sku}')]) # Create Topic self.cmd('servicebus topic create --resource-group {rg} --namespace-name {namespacename} --name {topicname} ', checks=[self.check('name', '{topicname}')]) # Get Topic self.cmd('servicebus topic show --resource-group {rg} --namespace-name {namespacename} --name {topicname} ', checks=[self.check('name', '{topicname}')]) # Create Subscription self.cmd( 'servicebus topic subscription create --resource-group {rg} --namespace-name {namespacename} --topic-name {topicname} --name {subscriptionname}', checks=[self.check('name', '{subscriptionname}')]) # Get Create Subscription self.cmd( 'servicebus topic subscription show --resource-group {rg} --namespace-name {namespacename} --topic-name {topicname} --name {subscriptionname}', checks=[self.check('name', '{subscriptionname}')]) # Create Rules rule = self.cmd( 'servicebus topic subscription rule create --resource-group {rg} --namespace-name {namespacename} --topic-name {topicname} --subscription-name {subscriptionname} --name {rulename3} --filter-sql-expression {sqlexpression}', checks=[self.check('name', '{rulename3}')]).get_output_in_json() self.assertEqual(rule['filterType'], 'SqlFilter') self.assertEqual(rule['sqlFilter']['sqlExpression'], self.kwargs['sqlexpression']) # Create Rules rule = self.cmd( 'servicebus topic subscription rule create --resource-group {rg} --namespace-name {namespacename} --topic-name {topicname} --subscription-name {subscriptionname} --name {rulename2} --filter-type SqlFilter --filter-sql-expression {sqlexpression} --enable-sql-preprocessing', checks=[self.check('name', '{rulename2}')]).get_output_in_json() self.assertEqual(rule['filterType'], 'SqlFilter') self.assertEqual(rule['sqlFilter']['sqlExpression'], self.kwargs['sqlexpression']) # Create Rules rule = self.cmd( 'servicebus topic subscription rule create --resource-group {rg} --namespace-name {namespacename} --topic-name {topicname} --subscription-name {subscriptionname} --name {rulename} --filter-type CorrelationFilter --correlation-id r00012d --label myvalue --message-id mid --reply-to reply --session-id ids --reply-to-session-id hi --content-type hi --to hi --correlation-filter-property key1=value1', checks=[self.check('name', '{rulename}')]).get_output_in_json() self.assertEqual(rule['filterType'], 'CorrelationFilter') self.assertEqual(rule['correlationFilter']['contentType'], 'hi') self.assertEqual(rule['correlationFilter']['correlationId'], 'r00012d') self.assertEqual(rule['correlationFilter']['label'], 'myvalue') self.assertEqual(rule['correlationFilter']['messageId'], 'mid') self.assertEqual(rule['correlationFilter']['replyTo'], 'reply') self.assertEqual(rule['correlationFilter']['replyToSessionId'], 'hi') self.assertEqual(rule['correlationFilter']['sessionId'], 'ids') self.assertEqual(rule['correlationFilter']['to'], 'hi') self.assertEqual(rule['correlationFilter']['properties'], {'key1': 'value1'}) # Get Created Rules self.cmd( 'servicebus topic subscription rule show --resource-group {rg} --namespace-name {namespacename} --topic-name {topicname} --subscription-name {subscriptionname} --name {rulename}', checks=[self.check('name', '{rulename}')]) # Update Rules rule = self.cmd( 'servicebus topic subscription rule update --resource-group {rg} --namespace-name {namespacename} --topic-name {topicname} --subscription-name {subscriptionname} --name {rulename3} --filter-sql-expression {sqlexpression1}', checks=[self.check('name', '{rulename3}')]).get_output_in_json() self.assertEqual(rule['filterType'], 'SqlFilter') self.assertEqual(rule['sqlFilter']['sqlExpression'], self.kwargs['sqlexpression1']) # Get Rules List By Subscription self.cmd( 'servicebus topic subscription rule list --resource-group {rg} --namespace-name {namespacename} --topic-name {topicname} --subscription-name {subscriptionname}') # Delete create rule self.cmd( 'servicebus topic subscription rule delete --resource-group {rg} --namespace-name {namespacename} --topic-name {topicname} --subscription-name {subscriptionname} --name {rulename}') # Delete create Subscription self.cmd( 'servicebus topic subscription delete --resource-group {rg} --namespace-name {namespacename} --topic-name {topicname} --name {subscriptionname}') # Delete Topic self.cmd('servicebus topic delete --resource-group {rg} --namespace-name {namespacename} --name {topicname}') # Delete Namespace self.cmd('servicebus namespace delete --resource-group {rg} --name {namespacename}')
5,624
test metric is computed correctly partial episode
# coding=utf-8 # Copyright 2020 The TF-Agents Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for tf_agents.metrics.batched_py_metric.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import from tf_agents.metrics import batched_py_metric from tf_agents.metrics import py_metrics from tf_agents.trajectories import trajectory from tf_agents.utils import nest_utils class BatchedPyMetricTest(tf.test.TestCase): def setUp(self): super(BatchedPyMetricTest, self).setUp() # Order of args for trajectory methods: # (observation, action, policy_info, reward, discount) self._ts0 = nest_utils.stack_nested_arrays([ trajectory.boundary((), (), (), 0.0, 1.0), trajectory.boundary((), (), (), 0.0, 1.0), ]) self._ts1 = nest_utils.stack_nested_arrays([ trajectory.first((), (), (), 1.0, 1.0), trajectory.first((), (), (), 2.0, 1.0), ]) self._ts2 = nest_utils.stack_nested_arrays([ trajectory.last((), (), (), 3.0, 1.0), trajectory.last((), (), (), 4.0, 1.0), ]) self._ts3 = nest_utils.stack_nested_arrays([ trajectory.boundary((), (), (), 0.0, 1.0), trajectory.boundary((), (), (), 0.0, 1.0), ]) self._ts4 = nest_utils.stack_nested_arrays([ trajectory.first((), (), (), 5.0, 1.0), trajectory.first((), (), (), 6.0, 1.0), ]) self._ts5 = nest_utils.stack_nested_arrays([ trajectory.last((), (), (), 7.0, 1.0), trajectory.last((), (), (), 8.0, 1.0), ]) def testMetricIsComputedCorrectlyNoSteps(self): batched_avg_return_metric = batched_py_metric.BatchedPyMetric( py_metrics.AverageReturnMetric ) self.assertEqual(batched_avg_return_metric.result(), 0) def METHOD_NAME(self): batched_avg_return_metric = batched_py_metric.BatchedPyMetric( py_metrics.AverageReturnMetric ) batched_avg_return_metric(self._ts0) batched_avg_return_metric(self._ts1) self.assertEqual(batched_avg_return_metric.result(), 0) def testMetricIsComputedCorrectlyOneEpisode(self): batched_avg_return_metric = batched_py_metric.BatchedPyMetric( py_metrics.AverageReturnMetric ) batched_avg_return_metric(self._ts0) batched_avg_return_metric(self._ts1) batched_avg_return_metric(self._ts2) self.assertEqual(batched_avg_return_metric.result(), 5) def testMetricIsComputedCorrectlyOneAndPartialEpisode(self): batched_avg_return_metric = batched_py_metric.BatchedPyMetric( py_metrics.AverageReturnMetric ) batched_avg_return_metric(self._ts0) batched_avg_return_metric(self._ts1) batched_avg_return_metric(self._ts2) batched_avg_return_metric(self._ts3) batched_avg_return_metric(self._ts4) self.assertEqual(batched_avg_return_metric.result(), 5) def testMetricIsComputedCorrectlyTwoEpisodes(self): batched_avg_return_metric = batched_py_metric.BatchedPyMetric( py_metrics.AverageReturnMetric ) batched_avg_return_metric(self._ts0) batched_avg_return_metric(self._ts1) batched_avg_return_metric(self._ts2) batched_avg_return_metric(self._ts3) batched_avg_return_metric(self._ts4) batched_avg_return_metric(self._ts5) self.assertEqual(batched_avg_return_metric.result(), 9) def testReset(self): batched_avg_return_metric = batched_py_metric.BatchedPyMetric( py_metrics.AverageReturnMetric ) batched_avg_return_metric(self._ts0) batched_avg_return_metric(self._ts1) batched_avg_return_metric(self._ts2) batched_avg_return_metric.reset() batched_avg_return_metric(self._ts3) batched_avg_return_metric(self._ts4) batched_avg_return_metric(self._ts5) self.assertEqual(batched_avg_return_metric.result(), 13) if __name__ == '__main__': tf.test.main()
5,625
test kv2 raise on deleted
import pytest from unittest import TestCase from unittest import mock from unittest.mock import MagicMock, Mock from parameterized import parameterized from hvac.api.secrets_engines.kv import Kv from hvac.api.secrets_engines.kv_v1 import KvV1 from hvac.api.secrets_engines.kv_v2 import KvV2 from hvac import exceptions class TestKv(TestCase): def test_v1_property(self): mock_adapter = MagicMock() kv = Kv(adapter=mock_adapter) self.assertIsInstance( obj=kv.v1, cls=KvV1, ) def test_v2_property(self): mock_adapter = MagicMock() kv = Kv(adapter=mock_adapter) self.assertIsInstance( obj=kv.v2, cls=KvV2, ) @parameterized.expand( [ ("v1", "1"), ("v2", "2"), ("v3", "3", ValueError), ("invalid version", "12345", ValueError), ] ) def test_default_kv_version_setter(self, test_label, version, raises=False): version_class_map = { "1": KvV1, "2": KvV2, } mock_adapter = MagicMock() kv = Kv(adapter=mock_adapter) if raises: with self.assertRaises(raises): kv.default_kv_version = version else: kv.default_kv_version = version self.assertIsInstance( obj=getattr(kv, "v%s" % version), cls=version_class_map.get(version), ) def test_getattr(self): mock_adapter = MagicMock() kv = Kv(adapter=mock_adapter, default_kv_version="1") self.assertEqual( first=kv.read_secret, second=kv.v1.read_secret, ) kv = Kv(adapter=mock_adapter, default_kv_version="2") self.assertEqual( first=kv.read_secret_version, second=kv.v2.read_secret_version, ) kv._default_kv_version = 0 with self.assertRaises(AttributeError): assert kv.read_secret class TestKv2: # TODO: v3.0.0 - remove this (there should be no more warning, the default will be set statically) @pytest.mark.parametrize("raise_on_del", [None, True, False]) def test_kv2_raise_on_deleted_warning(self, raise_on_del): mock_adapter = MagicMock() kv = Mock(wraps=Kv(adapter=mock_adapter, default_kv_version="2")) for method in [ kv.read_secret, kv.read_secret_version, kv.v2.read_secret, kv.v2.read_secret_version, ]: with mock.patch("warnings.warn") as w: p = "secret_path" method(p, raise_on_deleted_version=raise_on_del) if raise_on_del is None: assert w.call_count == 1 assert "category" in w.call_args[1] assert w.call_args[1]["category"] == DeprecationWarning # TODO: in py3.8+: assert "category" in w.call_args.kwargs # TODO: in py3.8+: assert w.call_args.kwargs["category"] == DeprecationWarning else: assert w.assert_not_called @pytest.mark.parametrize( ("json", "recoverable"), [ (None, False), ({}, False), ({"data": {"metadata": {"deletion_time": ""}}}, False), ({"data": {"metadata": {"deletion_time": "anything"}}}, True), ], ) @pytest.mark.parametrize("raise_on_del", [True, False]) def METHOD_NAME(self, raise_on_del, json, recoverable): def _getem(*args, **kwargs): raise exceptions.InvalidPath(json=json) mock_adapter = MagicMock(get=_getem) kv = Mock(wraps=Kv(adapter=mock_adapter, default_kv_version="2")) for method in [ kv.read_secret, kv.read_secret_version, kv.v2.read_secret, kv.v2.read_secret_version, ]: p = "secret_path" should_raise = raise_on_del or not recoverable if should_raise: with pytest.raises(exceptions.InvalidPath): method(p, raise_on_deleted_version=raise_on_del) else: r = method(p, raise_on_deleted_version=raise_on_del) assert r is json
5,626
get constraints
from collections.abc import Collection, Iterable, Sequence from typing import Any, ClassVar, Final, TypeVar from django.core.checks.messages import CheckMessage from django.core.exceptions import MultipleObjectsReturned as BaseMultipleObjectsReturned from django.core.exceptions import ObjectDoesNotExist, ValidationError from django.db.models import BaseConstraint, Field from django.db.models.manager import BaseManager, Manager from django.db.models.options import Options from typing_extensions import Self _Self = TypeVar("_Self", bound=Model) class ModelStateFieldsCacheDescriptor: ... class ModelState: db: str | None adding: bool fields_cache: ModelStateFieldsCacheDescriptor class ModelBase(type): @property def _default_manager(cls: type[_Self]) -> BaseManager[_Self]: ... # type: ignore[misc] @property def _base_manager(cls: type[_Self]) -> BaseManager[_Self]: ... # type: ignore[misc] class Model(metaclass=ModelBase): # Note: these two metaclass generated attributes don't really exist on the 'Model' # class, runtime they are only added on concrete subclasses of 'Model'. The # metaclass also sets up correct inheritance from concrete parent models exceptions. # Our mypy plugin aligns with this behaviour and will remove the 2 attributes below # and re-add them to correct concrete subclasses of 'Model' DoesNotExist: Final[type[ObjectDoesNotExist]] MultipleObjectsReturned: Final[type[BaseMultipleObjectsReturned]] # This 'objects' attribute will be deleted, via the plugin, in favor of managing it # to only exist on subclasses it exists on during runtime. objects: ClassVar[Manager[Self]] class Meta: ... _meta: Options[Any] pk: Any _state: ModelState def __init__(self, *args: Any, **kwargs: Any) -> None: ... @classmethod def add_to_class(cls, name: str, value: Any) -> Any: ... @classmethod def from_db(cls, db: str | None, field_names: Collection[str], values: Collection[Any]) -> Self: ... def delete(self, using: Any = ..., keep_parents: bool = ...) -> tuple[int, dict[str, int]]: ... async def adelete(self, using: Any = ..., keep_parents: bool = ...) -> tuple[int, dict[str, int]]: ... def full_clean( self, exclude: Iterable[str] | None = ..., validate_unique: bool = ..., validate_constraints: bool = ... ) -> None: ... def clean(self) -> None: ... def clean_fields(self, exclude: Collection[str] | None = ...) -> None: ... def validate_unique(self, exclude: Collection[str] | None = ...) -> None: ... def date_error_message(self, lookup_type: str, field_name: str, unique_for: str) -> ValidationError: ... def unique_error_message(self, model_class: type[Self], unique_check: Sequence[str]) -> ValidationError: ... def validate_constraints(self, exclude: Collection[str] | None = ...) -> None: ... def METHOD_NAME(self) -> list[tuple[type[Model], Sequence[BaseConstraint]]]: ... def save( self, force_insert: bool = ..., force_update: bool = ..., using: str | None = ..., update_fields: Iterable[str] | None = ..., ) -> None: ... async def asave( self, force_insert: bool = ..., force_update: bool = ..., using: str | None = ..., update_fields: Iterable[str] | None = ..., ) -> None: ... def save_base( self, raw: bool = ..., force_insert: bool = ..., force_update: bool = ..., using: str | None = ..., update_fields: Iterable[str] | None = ..., ) -> None: ... def refresh_from_db(self, using: str | None = ..., fields: Sequence[str] | None = ...) -> None: ... async def arefresh_from_db(self, using: str | None = ..., fields: Sequence[str] | None = ...) -> None: ... def serializable_value(self, field_name: str) -> Any: ... def prepare_database_save(self, field: Field) -> Any: ... def get_deferred_fields(self) -> set[str]: ... @classmethod def check(cls, **kwargs: Any) -> list[CheckMessage]: ... def __getstate__(self) -> dict: ... def model_unpickle(model_id: tuple[str, str] | type[Model]) -> Model: ...
5,627
parse forum list page
# Copyright (c) 2016 Andrew "rubenwardy" Ward # License: MIT # Source: https://github.com/rubenwardy/python_phpbb_parser import re import urllib import urllib.parse as urlparse import urllib.request from datetime import datetime from urllib.parse import urlencode from bs4 import BeautifulSoup def url_encode_non_ascii(b): return re.sub('[\x80-\xFF]', lambda c: '%%%02x' % ord(c.group(0)), b) class Profile: def __init__(self, username): self.username = username self.signature = "" self.avatar = None self.properties = {} def set(self, key, value): self.properties[key.lower()] = value def get(self, key): return self.properties.get(key.lower()) def __str__(self): return self.username + "\n" + str(self.signature) + "\n" + str(self.properties) def __extract_properties(profile, soup): el = soup.find(id="viewprofile") if el is None: return None res1 = el.find_all("dl") imgs = res1[0].find_all("img") if len(imgs) == 1: profile.avatar = imgs[0]["src"] res = el.select("dl.left-box.details") if len(res) != 1: return None catch_next_key = None # Look through for element in res[0].children: if element.name == "dt": if catch_next_key is None: catch_next_key = element.text.lower()[:-1].strip() else: print("Unexpected dt!") elif element.name == "dd": if catch_next_key is None: print("Unexpected dd!") else: if catch_next_key != "groups": profile.set(catch_next_key, element.text) catch_next_key = None elif element and element.name is not None: print("Unexpected other") def __extract_signature(soup): res = soup.find_all("div", class_="signature") if len(res) != 1: return None else: return str(res[0]) def get_profile_url(url, username): url = urlparse.urlparse(url) # Update path url = url._replace(path="/memberlist.php") # Set query args query = dict(urlparse.parse_qsl(url.query)) query.update({ "un": username, "mode": "viewprofile" }) query_encoded = urlencode(query) url = url._replace(query=query_encoded) return urlparse.urlunparse(url) def get_profile(url, username): url = get_profile_url(url, username) try: req = urllib.request.urlopen(url, timeout=15) except urllib.error.HTTPError as e: if e.code == 404: return None raise IOError(e) contents = req.read().decode("utf-8") soup = BeautifulSoup(contents, "lxml") if soup is None: return None profile = Profile(username) profile.signature = __extract_signature(soup) __extract_properties(profile, soup) return profile regex_id = re.compile(r"^.*t=([0-9]+).*$") def METHOD_NAME(id, page, out, extra=None): num_per_page = 30 start = page*num_per_page+1 print(" - Fetching page {} (topics {}-{})".format(page, start, start+num_per_page)) url = "https://forum.minetest.net/viewforum.php?f=" + str(id) + "&start=" + str(start) r = urllib.request.urlopen(url).read().decode("utf-8") soup = BeautifulSoup(r, "html.parser") for row in soup.find_all("li", class_="row"): classes = row.get("class") if "sticky" in classes or "announce" in classes or "global-announce" in classes: continue topic = row.find("dl") # Link info link = topic.find(class_="topictitle") id = regex_id.match(link.get("href")).group(1) title = link.find(text=True) # Date left = topic.find(class_="topic-poster") date = left.find("time").get_text() date = datetime.strptime(date, "%a %b %d, %Y %H:%M") links = left.find_all("a") if len(links) == 0: continue author = links[-1].get_text().strip() # Get counts posts = topic.find(class_="posts").find(text=True) views = topic.find(class_="views").find(text=True) if id in out: print(" - got {} again, title: {}".format(id, title)) assert title == out[id]['title'] return False row = { "id" : id, "title" : title, "author": author, "posts" : posts, "views" : views, "date" : date } if extra is not None: for key, value in extra.items(): row[key] = value out[id] = row return True def get_topics_from_forum(id, out, extra=None): print("Fetching all topics from forum {}".format(id)) page = 0 while METHOD_NAME(id, page, out, extra): page = page + 1 return out
5,628
on new role
from __future__ import annotations import random import re from typing import Optional from src import status from src.cats import All from src.containers import UserSet, UserDict from src.decorators import command from src.events import Event, event_listener from src.functions import get_all_players, get_target from src.messages import messages from src.roles.helper.wolves import is_known_wolf_ally, register_wolf, send_wolfchat_message from src.status import try_misdirection, try_exchange from src.users import User from src.dispatcher import MessageDispatcher from src.gamestate import GameState register_wolf("doomsayer") SEEN = UserSet() LASTSEEN: UserDict[User, User] = UserDict() KILLS: UserDict[User, User] = UserDict() SICK: UserDict[User, User] = UserDict() LYCANS: UserDict[User, User] = UserDict() _mappings = ("death", KILLS), ("lycan", LYCANS), ("sick", SICK) @command("see", chan=False, pm=True, playing=True, silenced=True, phases=("night",), roles=("doomsayer",)) def see(wrapper: MessageDispatcher, message: str): """Use your paranormal senses to determine a player's doom.""" if wrapper.source in SEEN: wrapper.send(messages["seer_fail"]) return var = wrapper.game_state target = get_target(wrapper, re.split(" +", message)[0], not_self_message="no_see_self") if not target: return if is_known_wolf_ally(var, wrapper.source, target): wrapper.send(messages["no_see_wolf"]) return if LASTSEEN.get(wrapper.source) is target: wrapper.send(messages["no_see_same"]) return target = try_misdirection(var, wrapper.source, target) if try_exchange(var, wrapper.source, target): return mode, mapping = random.choice(_mappings) # keys: doomsayer_death, doomsayer_lycan, doomsayer_sick wrapper.send(messages["doomsayer_{0}".format(mode)].format(target)) mapping[wrapper.source] = target send_wolfchat_message(var, wrapper.source, messages["doomsayer_wolfchat"].format(wrapper.source, target), ("doomsayer",), role="doomsayer", command="see") SEEN.add(wrapper.source) LASTSEEN[wrapper.source] = target @event_listener("new_role") def METHOD_NAME(evt: Event, var: GameState, player: User, old_role: Optional[str]): if old_role == "doomsayer" and evt.data["role"] != "doomsayer": SEEN.discard(player) for name, mapping in _mappings: del mapping[:player:] @event_listener("del_player") def on_del_player(evt: Event, var: GameState, player: User, all_roles: set[str], death_triggers: bool): # only remove from SEEN; keep results of sees intact on death # so that we can apply them in begin_day even if doomsayer dies. SEEN.discard(player) @event_listener("chk_nightdone") def on_chk_nightdone(evt: Event, var: GameState): evt.data["acted"].extend(SEEN) evt.data["nightroles"].extend(get_all_players(var, ("doomsayer",))) @event_listener("transition_day_begin") def on_transition_day_begin(evt: Event, var: GameState): for target in SICK.values(): target.queue_message(messages["player_sick"]) if SICK: User.send_messages() @event_listener("night_kills") def on_night_kills(evt: Event, var: GameState): for killer, victim in list(KILLS.items()): evt.data["victims"].add(victim) evt.data["killers"][victim].append(killer) @event_listener("transition_night_end") def on_transition_night_end(evt: Event, var: GameState): if LASTSEEN: # if doomsayer is in play and at least one of them saw someone the previous night, # let stats know that anyone could be a lycan status.add_lycanthropy_scope(var, All) for lycan in LYCANS.values(): status.add_lycanthropy(var, lycan) for sick in SICK.values(): status.add_disease(var, sick) LYCANS.clear() SICK.clear() @event_listener("begin_day") def on_begin_day(evt: Event, var: GameState): for sick in SICK.values(): status.add_absent(var, sick, "illness") status.add_silent(var, sick) # clear out LASTSEEN for people that didn't see last night for doom in list(LASTSEEN.keys()): if doom not in SEEN: del LASTSEEN[doom] SEEN.clear() KILLS.clear() @event_listener("reset") def on_reset(evt: Event, var: GameState): SEEN.clear() KILLS.clear() SICK.clear() LYCANS.clear() @event_listener("get_role_metadata") def on_get_role_metadata(evt: Event, var: Optional[GameState], kind: str): if kind == "role_categories": evt.data["doomsayer"] = {"Wolf", "Wolfchat", "Wolfteam", "Killer", "Nocturnal", "Village Objective", "Wolf Objective"}
5,629
visit ancestorof
# Generated from ECLsubset.g4 by ANTLR 4.10.1 from antlr4 import * if __name__ is not None and "." in __name__: from .ECLsubsetParser import ECLsubsetParser else: from ECLsubsetParser import ECLsubsetParser # This class defines a complete generic visitor for a parse tree produced by ECLsubsetParser. class ECLsubsetVisitor(ParseTreeVisitor): # Visit a parse tree produced by ECLsubsetParser#expressionconstraint. def visitExpressionconstraint( self, ctx: ECLsubsetParser.ExpressionconstraintContext ): return self.visitChildren(ctx) # Visit a parse tree produced by ECLsubsetParser#compoundexpressionconstraint. def visitCompoundexpressionconstraint( self, ctx: ECLsubsetParser.CompoundexpressionconstraintContext ): return self.visitChildren(ctx) # Visit a parse tree produced by ECLsubsetParser#disjunctionexpressionconstraint. def visitDisjunctionexpressionconstraint( self, ctx: ECLsubsetParser.DisjunctionexpressionconstraintContext ): return self.visitChildren(ctx) # Visit a parse tree produced by ECLsubsetParser#exclusionexpressionconstraint. def visitExclusionexpressionconstraint( self, ctx: ECLsubsetParser.ExclusionexpressionconstraintContext ): return self.visitChildren(ctx) # Visit a parse tree produced by ECLsubsetParser#subexpressionconstraint. def visitSubexpressionconstraint( self, ctx: ECLsubsetParser.SubexpressionconstraintContext ): return self.visitChildren(ctx) # Visit a parse tree produced by ECLsubsetParser#eclfocusconcept. def visitEclfocusconcept(self, ctx: ECLsubsetParser.EclfocusconceptContext): return self.visitChildren(ctx) # Visit a parse tree produced by ECLsubsetParser#eclconceptreference. def visitEclconceptreference(self, ctx: ECLsubsetParser.EclconceptreferenceContext): return self.visitChildren(ctx) # Visit a parse tree produced by ECLsubsetParser#conceptid. def visitConceptid(self, ctx: ECLsubsetParser.ConceptidContext): return self.visitChildren(ctx) # Visit a parse tree produced by ECLsubsetParser#constraintoperator. def visitConstraintoperator(self, ctx: ECLsubsetParser.ConstraintoperatorContext): return self.visitChildren(ctx) # Visit a parse tree produced by ECLsubsetParser#descendantof. def visitDescendantof(self, ctx: ECLsubsetParser.DescendantofContext): return self.visitChildren(ctx) # Visit a parse tree produced by ECLsubsetParser#descendantorselfof. def visitDescendantorselfof(self, ctx: ECLsubsetParser.DescendantorselfofContext): return self.visitChildren(ctx) # Visit a parse tree produced by ECLsubsetParser#childof. def visitChildof(self, ctx: ECLsubsetParser.ChildofContext): return self.visitChildren(ctx) # Visit a parse tree produced by ECLsubsetParser#ancestorof. def METHOD_NAME(self, ctx: ECLsubsetParser.AncestorofContext): return self.visitChildren(ctx) # Visit a parse tree produced by ECLsubsetParser#ancestororselfof. def visitAncestororselfof(self, ctx: ECLsubsetParser.AncestororselfofContext): return self.visitChildren(ctx) # Visit a parse tree produced by ECLsubsetParser#parentof. def visitParentof(self, ctx: ECLsubsetParser.ParentofContext): return self.visitChildren(ctx) # Visit a parse tree produced by ECLsubsetParser#disjunction. def visitDisjunction(self, ctx: ECLsubsetParser.DisjunctionContext): return self.visitChildren(ctx) # Visit a parse tree produced by ECLsubsetParser#exclusion. def visitExclusion(self, ctx: ECLsubsetParser.ExclusionContext): return self.visitChildren(ctx) # Visit a parse tree produced by ECLsubsetParser#sctid. def visitSctid(self, ctx: ECLsubsetParser.SctidContext): return self.visitChildren(ctx) # Visit a parse tree produced by ECLsubsetParser#ws. def visitWs(self, ctx: ECLsubsetParser.WsContext): return self.visitChildren(ctx) # Visit a parse tree produced by ECLsubsetParser#mws. def visitMws(self, ctx: ECLsubsetParser.MwsContext): return self.visitChildren(ctx) # Visit a parse tree produced by ECLsubsetParser#comment. def visitComment(self, ctx: ECLsubsetParser.CommentContext): return self.visitChildren(ctx) # Visit a parse tree produced by ECLsubsetParser#nonstarchar. def visitNonstarchar(self, ctx: ECLsubsetParser.NonstarcharContext): return self.visitChildren(ctx) # Visit a parse tree produced by ECLsubsetParser#starwithnonfslash. def visitStarwithnonfslash(self, ctx: ECLsubsetParser.StarwithnonfslashContext): return self.visitChildren(ctx) # Visit a parse tree produced by ECLsubsetParser#nonfslash. def visitNonfslash(self, ctx: ECLsubsetParser.NonfslashContext): return self.visitChildren(ctx) # Visit a parse tree produced by ECLsubsetParser#sp. def visitSp(self, ctx: ECLsubsetParser.SpContext): return self.visitChildren(ctx) # Visit a parse tree produced by ECLsubsetParser#htab. def visitHtab(self, ctx: ECLsubsetParser.HtabContext): return self.visitChildren(ctx) # Visit a parse tree produced by ECLsubsetParser#cr. def visitCr(self, ctx: ECLsubsetParser.CrContext): return self.visitChildren(ctx) # Visit a parse tree produced by ECLsubsetParser#lf. def visitLf(self, ctx: ECLsubsetParser.LfContext): return self.visitChildren(ctx) # Visit a parse tree produced by ECLsubsetParser#qm. def visitQm(self, ctx: ECLsubsetParser.QmContext): return self.visitChildren(ctx) # Visit a parse tree produced by ECLsubsetParser#bs. def visitBs(self, ctx: ECLsubsetParser.BsContext): return self.visitChildren(ctx) # Visit a parse tree produced by ECLsubsetParser#digit. def visitDigit(self, ctx: ECLsubsetParser.DigitContext): return self.visitChildren(ctx) # Visit a parse tree produced by ECLsubsetParser#zero. def visitZero(self, ctx: ECLsubsetParser.ZeroContext): return self.visitChildren(ctx) # Visit a parse tree produced by ECLsubsetParser#digitnonzero. def visitDigitnonzero(self, ctx: ECLsubsetParser.DigitnonzeroContext): return self.visitChildren(ctx) # Visit a parse tree produced by ECLsubsetParser#nonwsnonpipe. def visitNonwsnonpipe(self, ctx: ECLsubsetParser.NonwsnonpipeContext): return self.visitChildren(ctx) # Visit a parse tree produced by ECLsubsetParser#anynonescapedchar. def visitAnynonescapedchar(self, ctx: ECLsubsetParser.AnynonescapedcharContext): return self.visitChildren(ctx) # Visit a parse tree produced by ECLsubsetParser#escapedchar. def visitEscapedchar(self, ctx: ECLsubsetParser.EscapedcharContext): return self.visitChildren(ctx) del ECLsubsetParser
5,630
set up
# ========================================================================== # # Copyright NumFOCUS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ==========================================================================*/ import sys import unittest import datetime as dt import itk import numpy as np class TestNumpyVectorContainerMemoryviewInterface(unittest.TestCase): """This tests numpy array <-> ITK VectorContainer conversion.""" def METHOD_NAME(self): pass def test_NumPyBridge_VectorContainer(self): "Try to convert a itk.VectorContainer into a Numpy array and back." if not ( hasattr(itk.VectorContainer, "ULLF") and hasattr(itk.PyVectorContainer, "ULLF") and hasattr(itk.Point, "F3") and hasattr(itk.VectorContainer, "ULLPF3") and hasattr(itk.Point, "F2") and hasattr(itk.VectorContainer, "ULLPF2") ): # There is insufficient wrapping to perform this test; skip it. print("Insufficient wrapping to perform itkPyVectorContainerTest") return v1 = itk.VectorContainer[itk.ULL, itk.F].New() v1.Reserve(4) v1.SetElement(0, 1.2) v1.SetElement(1, 2) v1.SetElement(2, 4) v1.SetElement(3, 5) arr = itk.array_view_from_vector_container(v1) assert arr.dtype == np.float32 v2 = itk.vector_container_from_array(arr) self.assertEqual(v1.Size(), arr.shape[0]) self.assertEqual(v1.Size(), v2.Size()) # Compute difference between the original vector and numpy array view diff = 0.0 for ii in range(0, v1.Size()): diff += abs(v1.GetElement(ii) - arr[ii]) self.assertEqual(0, diff) # Compute difference between the two vectors diff = 0.0 for ii in range(0, v1.Size()): diff += abs(v1.GetElement(ii) - v2.GetElement(ii)) self.assertEqual(0, diff) # Test view v1.SetElement(0, 1) self.assertEqual(v1.GetElement(0), arr[0]) # Test deep copy arr_cp = itk.array_from_vector_container(v1) self.assertEqual(v1.GetElement(0), arr_cp[0]) v1.SetElement(0, 0) self.assertNotEqual(v1.GetElement(0), arr_cp[0]) v2_cp = itk.vector_container_from_array(arr_cp) arr_cp[0] = 2 self.assertNotEqual(v2_cp.GetElement(0), arr_cp[0]) PointType = itk.Point[itk.F, 3] v_point = itk.VectorContainer[itk.ULL, PointType].New() v_point.Reserve(2) point = PointType() point[0] = 1.0 point[1] = 2.0 point[2] = 4.0 v_point.SetElement(0, point) point[0] = 6.0 point[1] = 8.0 point[2] = 9.0 v_point.SetElement(1, point) arr = itk.array_view_from_vector_container(v_point) self.assertTrue( np.array_equal(arr, np.array([[1.0, 2.0, 4.0], [6.0, 8.0, 9.0]])) ) PointType = itk.Point[itk.F, 2] v_point = itk.VectorContainer[itk.ULL, PointType].New() v_point.Reserve(2) point = PointType() point[0] = 1.0 point[1] = 2.0 v_point.SetElement(0, point) point[0] = 6.0 point[1] = 8.0 v_point.SetElement(1, point) arr = itk.array_view_from_vector_container(v_point) self.assertTrue(np.array_equal(arr, np.array([[1.0, 2.0], [6.0, 8.0]]))) if __name__ == "__main__": unittest.main()
5,631
saved keys
import ConfigSpace as CS from deephyper.problem import HpProblem from deephyper.search.nas._regevo import RegularizedEvolution class RegularizedEvolutionMixed(RegularizedEvolution): """Extention of the `Regularized evolution <https://arxiv.org/abs/1802.01548>`_ neural architecture search to the case of joint hyperparameter and neural architecture search. Args: problem (NaProblem): Neural architecture search problem describing the search space to explore. evaluator (Evaluator): An ``Evaluator`` instance responsible of distributing the tasks. random_state (int, optional): Random seed. Defaults to None. log_dir (str, optional): Log directory where search's results are saved. Defaults to ".". verbose (int, optional): Indicate the verbosity level of the search. Defaults to 0. population_size (int, optional): the number of individuals to keep in the population. Defaults to 100. sample_size (int, optional): the number of individuals that should participate in each tournament. Defaults to 10. """ def __init__( self, problem, evaluator, random_state: int = None, log_dir: str = ".", verbose: int = 0, population_size: int = 100, sample_size: int = 10, **kwargs, ): super().__init__( problem, evaluator, random_state, log_dir, verbose, population_size, sample_size, ) # Setup na_search_space = self._problem.build_search_space() self.hp_space = self._problem._hp_space # !hyperparameters self.hp_size = len(self.hp_space.space.get_hyperparameter_names()) self.na_space = HpProblem() self.na_space._space.seed(self._random_state.get_state()[1][0]) for i, (low, high) in enumerate(na_search_space.choices()): self.na_space.add_hyperparameter((low, high), name=f"vnode_{i:05d}") self._space = CS.ConfigurationSpace(seed=self._random_state.get_state()[1][0]) self._space.add_configuration_space( prefix="1", configuration_space=self.hp_space.space ) self._space.add_configuration_space( prefix="2", configuration_space=self.na_space.space ) self._space_size = len(self._space.get_hyperparameter_names()) def METHOD_NAME(self, job): res = {"arch_seq": str(job.config["arch_seq"])} hp_names = self._problem._hp_space._space.get_hyperparameter_names() for hp_name in hp_names: if hp_name == "loss": res["loss"] = job.config["loss"] else: res[hp_name] = job.config["hyperparameters"][hp_name] return res def _search(self, max_evals, timeout): num_evals_done = 0 # Filling available nodes at start self._evaluator.submit(self._gen_random_batch(size=self._evaluator.num_workers)) # Main loop while max_evals < 0 or num_evals_done < max_evals: # Collecting finished evaluations new_results = list(self._evaluator.gather("BATCH", size=1)) num_received = len(new_results) if num_received > 0: self._population.extend(new_results) self._evaluator.dump_evals( saved_keys=self.METHOD_NAME, log_dir=self._log_dir ) num_evals_done += num_received if num_evals_done >= max_evals: break # If the population is big enough evolve the population if len(self._population) == self._population_size: children_batch = [] # For each new parent/result we create a child from it for _ in range(num_received): # select_sample indexes = self._random_state.choice( self._population_size, self._sample_size, replace=False ) sample = [self._population[i] for i in indexes] # select_parent parent = self._select_parent(sample) # copy_mutate_parent child = self._copy_mutate_arch(parent) # add child to batch children_batch.append(child) # submit_childs self._evaluator.submit(children_batch) else: # If the population is too small keep increasing it new_batch = self._gen_random_batch(size=num_received) self._evaluator.submit(new_batch) def _select_parent(self, sample: list) -> dict: cfg, _ = max(sample, key=lambda x: x[1]) return cfg def _gen_random_batch(self, size: int) -> list: def sample(hp, size): return [hp.sample(self._space.random) for _ in range(size)] batch = [] iterator = zip(*(sample(hp, size) for hp in self._space.get_hyperparameters())) for x in iterator: cfg = self._problem.gen_config( list(x[self.hp_size :]), list(x[: self.hp_size]) ) batch.append(cfg) return batch def _copy_mutate_arch(self, parent_cfg: dict) -> dict: """ # ! Time performance is critical because called sequentialy Args: parent_arch (list(int)): embedding of the parent's architecture. Returns: dict: embedding of the mutated architecture of the child. """ hp_x = self._problem.extract_hp_values(parent_cfg) x = hp_x + parent_cfg["arch_seq"] i = self._random_state.choice(self._space_size) hp = self._space.get_hyperparameters()[i] x[i] = hp.sample(self._space.random) child_cfg = self._problem.gen_config(x[self.hp_size :], x[: self.hp_size]) return child_cfg
5,632
decode boolean
"""IC wrapper module, based on Internet Config 1.3""" from warnings import warnpy3k warnpy3k("In 3.x, the ic module is removed.", stacklevel=2) import icglue import string import sys import os from Carbon import Res import Carbon.File import macostools error=icglue.error # From ictypes.h: icPrefNotFoundErr = -666 # preference not found (duh!) icPermErr = -667 # cannot set preference icPrefDataErr = -668 # problem with preference data icInternalErr = -669 # hmm, this is not good icTruncatedErr = -670 # more data was present than was returned icNoMoreWritersErr = -671 # you cannot begin a write session because someone else is already doing it */ icNothingToOverrideErr = -672 # no component for the override component to capture icNoURLErr = -673 # no URL found icConfigNotFoundErr = -674 # no configuration was found icConfigInappropriateErr = -675 # incorrect manufacturer code ICattr_no_change = -1 icNoPerm = 0 icReadOnlyPerm = 1 icReadWritePerm = 2 # End of ictypes.h class ICOpaqueData: """An unparseable IC entry""" def __init__(self, data): self.data = data def __repr__(self): return "ICOpaqueData(%r)"%(self.data,) _ICOpaqueDataType=type(ICOpaqueData('')) def _decode_default(data, key): if len(data) == 0: return data if ord(data[0]) == len(data)-1: # Assume Pstring return data[1:] return ICOpaqueData(data) def _decode_multistr(data, key): numstr = ord(data[0]) << 8 | ord(data[1]) rv = [] ptr = 2 for i in range(numstr): strlen = ord(data[ptr]) str = data[ptr+1:ptr+strlen+1] rv.append(str) ptr = ptr + strlen + 1 return rv def _decode_fontrecord(data, key): size = ord(data[0]) << 8 | ord(data[1]) face = ord(data[2]) namelen = ord(data[4]) return size, face, data[5:5+namelen] def METHOD_NAME(data, key): return ord(data[0]) def _decode_text(data, key): return data def _decode_charset(data, key): return data[:256], data[256:] def _decode_appspec(data, key): namelen = ord(data[4]) return data[0:4], data[5:5+namelen] def _code_default(data, key): return chr(len(data)) + data def _code_multistr(data, key): numstr = len(data) rv = chr((numstr>>8) & 0xff) + chr(numstr & 0xff) for i in data: rv = rv + _code_default(i) return rv def _code_fontrecord(data, key): size, face, name = data return chr((size>>8) & 0xff) + chr(size & 0xff) + chr(face & 0xff) + \ chr(0) + _code_default(name) def _code_boolean(data, key): print 'XXXX boolean:', repr(data) return chr(data) def _code_text(data, key): return data def _code_charset(data, key): return data[0] + data[1] def _code_appspec(data, key): return data[0] + _code_default(data[1]) _decoder_table = { "ArchieAll" : (_decode_multistr , _code_multistr), "UMichAll" : (_decode_multistr , _code_multistr), "InfoMacAll" : (_decode_multistr , _code_multistr), "ListFont" : (_decode_fontrecord , _code_fontrecord), "ScreenFont" : (_decode_fontrecord , _code_fontrecord), "PrinterFont" : (_decode_fontrecord , _code_fontrecord), # "DownloadFolder" : (_decode_filespec , _code_filespec), "Signature": (_decode_text , _code_text), "Plan" : (_decode_text , _code_text), "MailHeaders" : (_decode_text , _code_text), "NewsHeaders" : (_decode_text , _code_text), # "Mapping" "CharacterSet" : (_decode_charset , _code_charset), "Helper\245" : (_decode_appspec , _code_appspec), # "Services" : (_decode_services, ????), "NewMailFlashIcon" : (METHOD_NAME , _code_boolean), "NewMailDialog" : (METHOD_NAME , _code_boolean), "NewMailPlaySound" : (METHOD_NAME , _code_boolean), # "WebBackgroundColor" : _decode_color, "NoProxyDomains" : (_decode_multistr , _code_multistr), "UseHTTPProxy" : (METHOD_NAME , _code_boolean), "UseGopherProxy": (METHOD_NAME , _code_boolean), "UseFTPProxy" : (METHOD_NAME , _code_boolean), "UsePassiveFTP" : (METHOD_NAME , _code_boolean), } def _decode(data, key): if '\245' in key: key2 = key[:string.index(key, '\245')+1] else: key2 = key if key2 in _decoder_table: decoder = _decoder_table[key2][0] else: decoder = _decode_default return decoder(data, key) def _code(data, key): if type(data) == _ICOpaqueDataType: return data.data if '\245' in key: key2 = key[:string.index(key, '\245')+1] else: key2 = key if key2 in _decoder_table: coder = _decoder_table[key2][1] else: coder = _code_default return coder(data, key) class IC: def __init__(self, signature='Pyth', ic=None): if ic: self.ic = ic else: self.ic = icglue.ICStart(signature) if hasattr(self.ic, 'ICFindConfigFile'): self.ic.ICFindConfigFile() self.h = Res.Resource('') def keys(self): rv = [] self.ic.ICBegin(icReadOnlyPerm) num = self.ic.ICCountPref() for i in range(num): rv.append(self.ic.ICGetIndPref(i+1)) self.ic.ICEnd() return rv def has_key(self, key): return self.__contains__(key) def __contains__(self, key): try: dummy = self.ic.ICFindPrefHandle(key, self.h) except icglue.error: return 0 return 1 def __getitem__(self, key): attr = self.ic.ICFindPrefHandle(key, self.h) return _decode(self.h.data, key) def __setitem__(self, key, value): value = _code(value, key) self.ic.ICSetPref(key, ICattr_no_change, value) def launchurl(self, url, hint=""): # Work around a bug in ICLaunchURL: file:/foo does # not work but file:///foo does. if url[:6] == 'file:/' and url[6] != '/': url = 'file:///' + url[6:] self.ic.ICLaunchURL(hint, url, 0, len(url)) def parseurl(self, data, start=None, end=None, hint=""): if start is None: selStart = 0 selEnd = len(data) else: selStart = selEnd = start if end is not None: selEnd = end selStart, selEnd = self.ic.ICParseURL(hint, data, selStart, selEnd, self.h) return self.h.data, selStart, selEnd def mapfile(self, file): if type(file) != type(''): file = file.as_tuple()[2] return self.ic.ICMapFilename(file) def maptypecreator(self, type, creator, filename=""): return self.ic.ICMapTypeCreator(type, creator, filename) def settypecreator(self, file): file = Carbon.File.pathname(file) record = self.mapfile(os.path.split(file)[1]) MacOS.SetCreatorAndType(file, record[2], record[1]) macostools.touched(fss) # Convenience routines _dft_ic = None def launchurl(url, hint=""): global _dft_ic if _dft_ic is None: _dft_ic = IC() return _dft_ic.launchurl(url, hint) def parseurl(data, start=None, end=None, hint=""): global _dft_ic if _dft_ic is None: _dft_ic = IC() return _dft_ic.parseurl(data, start, end, hint) def mapfile(filename): global _dft_ic if _dft_ic is None: _dft_ic = IC() return _dft_ic.mapfile(filename) def maptypecreator(type, creator, filename=""): global _dft_ic if _dft_ic is None: _dft_ic = IC() return _dft_ic.maptypecreator(type, creator, filename) def settypecreator(file): global _dft_ic if _dft_ic is None: _dft_ic = IC() return _dft_ic.settypecreator(file) def _test(): ic = IC() for k in ic.keys(): try: v = ic[k] except error: v = '????' print k, '\t', v sys.exit(1) if __name__ == '__main__': _test()
5,633
add xform resource overrides
""" ResourceOverrideHelper ---------------------- This is dead code. It supports a legacy feature, multi-master linked applications. The actual flag has been removed, but a lot of related code still exists. """ from collections import Counter from django.db import models from corehq.apps.app_manager.exceptions import ResourceOverrideError from corehq.apps.app_manager.suite_xml.contributors import PostProcessor from corehq.apps.app_manager.suite_xml.sections.resources import FormResourceContributor from corehq.apps.app_manager.suite_xml.xml_models import XFormResource from corehq.util.quickcache import quickcache from corehq.util.timer import time_method class ResourceOverride(models.Model): domain = models.CharField(max_length=255, null=False) app_id = models.CharField(max_length=255, null=False) # Type of resource, e.g., xform. Populated by the root_name of the relevant suite_xml.xml_models class. root_name = models.CharField(max_length=32, null=False) pre_id = models.CharField(max_length=255, null=False) post_id = models.CharField(max_length=255, null=False) class Meta(object): unique_together = ('domain', 'app_id', 'root_name', 'pre_id') def copy_xform_resource_overrides(domain, app_id, id_map): """ Adds a new set of overrides that's a copy of existing overrides. id_map has keys that are the existing ids and values that are the corresponding ids to add. """ pre_to_post_map = {} for pre_id, override in get_xform_resource_overrides(domain, app_id).items(): # If the app already has an override for a form unique id in the old app... if pre_id in id_map.keys(): # ...then add the same override, for the same form in the new app pre_to_post_map[id_map[pre_id]] = override.post_id if pre_to_post_map: return METHOD_NAME(domain, app_id, pre_to_post_map) return [] def METHOD_NAME(domain, app_id, pre_to_post_map): overrides_by_pre_id = get_xform_resource_overrides(domain, app_id) errors = [] new_overrides = [] for pre_id, post_id in pre_to_post_map.items(): if pre_id in overrides_by_pre_id: if post_id != overrides_by_pre_id[pre_id].post_id: errors.append("Attempt to change {} from {} to {}".format( pre_id, overrides_by_pre_id[pre_id].post_id, post_id )) else: new_overrides.append(ResourceOverride( domain=domain, app_id=app_id, root_name=XFormResource.ROOT_NAME, pre_id=pre_id, post_id=post_id, )) if new_overrides and not errors: ResourceOverride.objects.bulk_create(new_overrides) get_xform_resource_overrides.clear(domain, app_id) if errors: raise ResourceOverrideError(""" Cannot update overrides for domain {}, app {}, errors:\n{} """.strip().format(domain, app_id, "\n".join(["\t{}".format(e) for e in errors]))) return new_overrides @quickcache(['domain', 'app_id'], timeout=1 * 60 * 60) def get_xform_resource_overrides(domain, app_id): return { override.pre_id: override for override in ResourceOverride.objects.filter( domain=domain, app_id=app_id, root_name=XFormResource.ROOT_NAME, ) } class ResourceOverrideHelper(PostProcessor): @time_method() def update_suite(self): """ Applies manual overrides of resource ids. """ overrides_by_pre_id = get_xform_resource_overrides(self.app.domain, self.app.origin_id) resources = getattr(self.suite, FormResourceContributor.section_name) for resource in resources: if resource.id in overrides_by_pre_id: resource.id = overrides_by_pre_id[resource.id].post_id id_counts = Counter(resource.id for resource in resources) duplicates = [key for key, count in id_counts.items() if count > 1] if duplicates: raise ResourceOverrideError("Duplicate resource ids found: {}".format(", ".join(duplicates)))
5,634
write run
import json import os from pathlib import Path from typing import Optional from biosimulations_pipeline.datamodels import ( BiosimulationsProject, SourceOmex, SimulationRun, SimulatorComparison ) def _get_project_name(omex_file: Path) -> str: return str(omex_file.name).split(".")[0] class DataManager(object): omex_src_dir: Path out_dir: Path projects_ndjson_file: Path runs_ndjson_file: Path def __init__(self, omex_src_dir: Optional[Path] = None, out_dir: Optional[Path] = None): self.omex_src_dir = Path(os.environ.get("OMEX_SOURCE_DIR", "OMEX_SOURCE_DIR-not-specified")) if omex_src_dir is not None: self.omex_src_dir = omex_src_dir if not os.path.exists(self.omex_src_dir): raise ValueError(f"Base source directory {self.omex_src_dir} does not exist") self.out_dir = Path(os.environ.get("OMEX_OUTPUT_DIR", "OMEX_OUTPUT_DIR-not-specified")) if out_dir is not None: self.out_dir = out_dir if not os.path.exists(self.out_dir): os.makedirs(self.out_dir) self.projects_ndjson_file = self.out_dir / 'biosimulations_projects.ndjson' self.runs_ndjson_file = self.out_dir / 'biosimulations_runs.ndjson' self.compare_ndjson_file = self.out_dir / 'biosimulations_comparisons.ndjson' def read_run_requests(self) -> list[SimulationRun]: projects: list[SimulationRun] if os.path.exists(self.runs_ndjson_file): with open(self.runs_ndjson_file) as f: projects = [SimulationRun(**json.loads(line)) for line in f.readlines()] else: projects = [] return projects def read_projects(self) -> list[BiosimulationsProject]: projects: list[BiosimulationsProject] if os.path.exists(self.projects_ndjson_file): with open(self.projects_ndjson_file) as f: projects = [BiosimulationsProject(**json.loads(line)) for line in f.readlines()] else: projects = [] return projects def write_project(self, project: BiosimulationsProject) -> None: with open(self.projects_ndjson_file, 'a') as f: f.write(json.dumps(project.dict()) + "\n") def get_spec_omex_list(self) -> list[Path]: omex_files: list[Path] = [] for omex_file in os.listdir(self.omex_src_dir): if not str(omex_file).endswith(".omex"): continue omex_files.append(self.omex_src_dir / str(omex_file)) return omex_files def get_source_omex_archives(self) -> list[SourceOmex]: source_omex_archives: list[SourceOmex] = [] for omex_file_name in os.listdir(self.omex_src_dir): if not str(omex_file_name).endswith(".omex"): continue omex_file = self.omex_src_dir / str(omex_file_name) project_id = _get_project_name(omex_file) source_omex_archives.append(SourceOmex(omex_file=omex_file, project_id=project_id)) return source_omex_archives def METHOD_NAME(self, simulation_run: SimulationRun) -> None: with open(self.runs_ndjson_file, 'a') as f: f.write(json.dumps(simulation_run.dict()) + "\n") def write_runs(self, runs: list[SimulationRun]): with open(self.runs_ndjson_file, 'wt') as f: for run in runs: f.write(json.dumps(run.dict()) + "\n") def write_comparison(self, simulation_comparison: SimulatorComparison) -> None: with open(self.compare_ndjson_file, 'a') as f: f.write(json.dumps(simulation_comparison.dict()) + "\n") def read_comparisons(self) -> list[SimulatorComparison]: comparisons: list[SimulatorComparison] if os.path.exists(self.compare_ndjson_file): with open(self.compare_ndjson_file) as f: comparisons = [SimulatorComparison(**json.loads(line)) for line in f.readlines()] else: comparisons = [] return comparisons def get_run_output_dir(self, simulation_run: SimulationRun) -> Path: run_out_dir = self.out_dir / simulation_run.project_id / simulation_run.simulator.value / simulation_run.simulator_version if not os.path.exists(run_out_dir): os.makedirs(run_out_dir) return run_out_dir
5,635
test applies correctly for sentry apps
from unittest.mock import patch import pytest from rest_framework import serializers from sentry.rules.actions.sentry_apps import NotifyEventSentryAppAction from sentry.silo import SiloMode from sentry.tasks.sentry_apps import notify_sentry_app from sentry.testutils.cases import RuleTestCase from sentry.testutils.silo import assume_test_silo_mode, region_silo_test ValidationError = serializers.ValidationError SENTRY_APP_ALERT_ACTION = "sentry.rules.actions.notify_event_sentry_app.NotifyEventSentryAppAction" @region_silo_test(stable=True) class NotifyEventSentryAppActionTest(RuleTestCase): rule_cls = NotifyEventSentryAppAction schema_data = [ {"name": "title", "value": "Squid Game"}, {"name": "summary", "value": "circle triangle square"}, ] @pytest.fixture(autouse=True) def create_schema(self): self.schema = {"elements": [self.create_alert_rule_action_schema()]} def METHOD_NAME(self): event = self.get_event() self.app = self.create_sentry_app( organization=event.organization, name="Test Application", is_alertable=True, schema=self.schema, ) self.install = self.create_sentry_app_installation( slug="test-application", organization=event.organization ) rule = self.get_rule( data={ "sentryAppInstallationUuid": self.install.uuid, "settings": self.schema_data, } ) assert rule.id == SENTRY_APP_ALERT_ACTION futures = list(rule.after(event=event, state=self.get_state())) assert len(futures) == 1 assert futures[0].callback is notify_sentry_app assert futures[0].kwargs["sentry_app"].id == self.app.id assert futures[0].kwargs["schema_defined_settings"] == self.schema_data @patch("sentry.sentry_apps.SentryAppComponentPreparer.run") def test_sentry_app_actions(self, mock_sentry_app_component_preparer): event = self.get_event() self.project = self.create_project(organization=event.organization) self.app = self.create_sentry_app( organization=event.organization, name="Test Application", is_alertable=True, schema=self.schema, ) self.install = self.create_sentry_app_installation( slug="test-application", organization=event.organization ) rule = self.get_rule( data={ "sentryAppInstallationUuid": self.install.uuid, "settings": self.schema_data, } ) action_list = rule.get_custom_actions(self.project) assert len(action_list) == 1 action = action_list[0] alert_element = self.schema["elements"][0] assert action["id"] == SENTRY_APP_ALERT_ACTION assert action["service"] == self.app.slug assert action["prompt"] == self.app.name assert action["actionType"] == "sentryapp" assert action["enabled"] assert action["formFields"] == alert_element["settings"] assert alert_element["title"] in action["label"] def test_self_validate(self): self.organization = self.create_organization() self.app = self.create_sentry_app( organization=self.organization, name="Test Application", is_alertable=True, schema=self.schema, ) self.install = self.create_sentry_app_installation( slug="test-application", organization=self.organization ) # Test no Sentry App Installation uuid rule = self.get_rule(data={"hasSchemaFormConfig": True}) with pytest.raises(ValidationError): rule.self_validate() # Test invalid Sentry App Installation uuid rule = self.get_rule( data={"hasSchemaFormConfig": True, "sentryAppInstallationUuid": "not_a_real_uuid"} ) with pytest.raises(ValidationError): rule.self_validate() # Test deleted Sentry App Installation uuid test_install = self.create_sentry_app_installation( organization=self.organization, slug="test-application" ) with assume_test_silo_mode(SiloMode.CONTROL): test_install.delete() rule = self.get_rule( data={"hasSchemaFormConfig": True, "sentryAppInstallationUuid": test_install.uuid} ) with pytest.raises(ValidationError): rule.self_validate() # Test Sentry Apps without alert rules configured in their schema self.create_sentry_app(organization=self.organization, name="No Alert Rule Action") test_install = self.create_sentry_app_installation( organization=self.organization, slug="no-alert-rule-action" ) rule = self.get_rule( data={"hasSchemaFormConfig": True, "sentryAppInstallationUuid": test_install.uuid} ) with pytest.raises(ValidationError): rule.self_validate() # Test without providing settings in rule data rule = self.get_rule( data={"hasSchemaFormConfig": True, "sentryAppInstallationUuid": self.install.uuid} ) with pytest.raises(ValidationError): rule.self_validate() # Test without providing required field values rule = self.get_rule( data={ "hasSchemaFormConfig": True, "sentryAppInstallationUuid": self.install.uuid, "settings": [{"name": "title", "value": "Lamy"}], } ) with pytest.raises(ValidationError): rule.self_validate() # Test with additional fields not on the app's schema rule = self.get_rule( data={ "hasSchemaFormConfig": True, "sentryAppInstallationUuid": self.install.uuid, "settings": [ {"name": "title", "value": "Lamy"}, {"name": "summary", "value": "Safari"}, {"name": "invalidField", "value": "Invalid Value"}, ], } ) with pytest.raises(ValidationError): rule.self_validate() # Test with invalid value on Select field rule = self.get_rule( data={ "hasSchemaFormConfig": True, "sentryAppInstallationUuid": self.install.uuid, "settings": [ {"name": "title", "value": "Lamy"}, {"name": "summary", "value": "Safari"}, {"name": "points", "value": "Invalid Select Value"}, ], } ) with pytest.raises(ValidationError): rule.self_validate() def test_render_label(self): event = self.get_event() self.app = self.create_sentry_app( organization=event.organization, name="Test Application", is_alertable=True, schema=self.schema, ) self.install = self.create_sentry_app_installation( slug="test-application", organization=event.organization ) rule = self.get_rule( data={ "sentryAppInstallationUuid": self.install.uuid, "settings": self.schema_data, } ) assert rule.render_label() == "Create Task with App"
5,636
model
################################################################################# # The Institute for the Design of Advanced Energy Systems Integrated Platform # Framework (IDAES IP) was produced under the DOE Institute for the # Design of Advanced Energy Systems (IDAES). # # Copyright (c) 2018-2023 by the software owners: The Regents of the # University of California, through Lawrence Berkeley National Laboratory, # National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon # University, West Virginia University Research Corporation, et al. # All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md # for full copyright and license information. ################################################################################# __author__ = "Douglas Allan" import pytest import numpy as np import pyomo.environ as pyo from idaes.core import FlowsheetBlock from idaes.core.util.model_statistics import degrees_of_freedom import idaes.models_extra.power_generation.unit_models.soc_submodels as soc import idaes.models_extra.power_generation.unit_models.soc_submodels.testing as soc_testing from idaes.core.solvers import get_solver solver = get_solver("ipopt") @pytest.fixture def METHOD_NAME(): time_set = [0] zfaces = np.linspace(0, 1, 4).tolist() m = soc_testing._cell_flowsheet_model( dynamic=False, time_set=time_set, zfaces=zfaces ) iznodes = m.fs.iznodes tset = m.fs.config.time m.fs.temperature_deviation_x = pyo.Var( tset, iznodes, initialize=0, units=pyo.units.K ) m.fs.heat_flux_x0 = pyo.Var( tset, iznodes, initialize=0, units=pyo.units.W / pyo.units.m**2 ) m.fs.contact = soc.SocContactResistor( control_volume_zfaces=zfaces, length_z=m.fs.length_z, length_y=m.fs.length_y, current_density=m.fs.current_density, temperature_z=m.fs.temperature_z, temperature_deviation_x=m.fs.temperature_deviation_x, heat_flux_x0=m.fs.heat_flux_x0, ) m.fs.temperature_deviation_x.fix(0) m.fs.heat_flux_x0.fix(0) m.fs.contact.log_preexponential_factor.fix(pyo.log(0.46e-4)) m.fs.contact.thermal_exponent_dividend.fix(0) m.fs.contact.contact_fraction.fix(1) return m @pytest.fixture def model2(): time_set = [0, 1] zfaces = np.linspace(0, 1, 8).tolist() m = pyo.ConcreteModel() m.fs = FlowsheetBlock(dynamic=False, time_set=time_set, time_units=pyo.units.s) m.fs.contact = soc.SocContactResistor(control_volume_zfaces=zfaces) m.fs.contact.current_density.fix(0) m.fs.contact.temperature_z.fix(0) m.fs.contact.temperature_deviation_x.fix(0) m.fs.contact.heat_flux_x0.fix(0) m.fs.contact.log_preexponential_factor.fix(pyo.log(0.46e-4)) m.fs.contact.thermal_exponent_dividend.fix(0) m.fs.contact.contact_fraction.fix(1) return m @pytest.mark.build @pytest.mark.unit def test_build(METHOD_NAME): contact = METHOD_NAME.fs.contact nz = len(contact.iznodes) nt = len(METHOD_NAME.fs.time) soc_testing._build_test_utility( contact, comp_dict={ pyo.Var: { "temperature_z": nz * nt, "temperature_deviation_x": nz * nt, "heat_flux_x0": nz * nt, "current_density": nz * nt, "length_z": 1, "length_y": 1, "heat_flux_x1": nz * nt, "log_preexponential_factor": 1, "thermal_exponent_dividend": 1, "contact_fraction": 1, }, pyo.Constraint: {"heat_flux_x_eqn": nz * nt}, pyo.Expression: { "temperature": nz * nt, "contact_resistance": nz * nt, "voltage_drop_total": nz * nt, "joule_heating_flux": nz * nt, }, }, references=[ "temperature_z", "temperature_deviation_x", "heat_flux_x0", "current_density", "length_z", "length_y", ], ) assert degrees_of_freedom(METHOD_NAME.fs.contact) == 0 @pytest.mark.build @pytest.mark.unit def test_build2(model2): contact = model2.fs.contact nz = len(contact.iznodes) nt = len(model2.fs.time) soc_testing._build_test_utility( contact, comp_dict={ pyo.Var: { "temperature_z": nz * nt, "temperature_deviation_x": nz * nt, "heat_flux_x0": nz * nt, "current_density": nz * nt, "length_z": 1, "length_y": 1, "heat_flux_x1": nz * nt, "log_preexponential_factor": 1, "thermal_exponent_dividend": 1, "contact_fraction": 1, }, pyo.Constraint: {"heat_flux_x_eqn": nz * nt}, pyo.Expression: { "temperature": nz * nt, "contact_resistance": nz * nt, "voltage_drop_total": nz * nt, "joule_heating_flux": nz * nt, }, }, ) assert degrees_of_freedom(model2.fs.contact) == 0 @pytest.mark.solver @pytest.mark.skipif(solver is None, reason="Solver not available") @pytest.mark.component def test_initialization(METHOD_NAME): METHOD_NAME.fs.contact.initialize_build(optarg={"nlp_scaling_method": "user-scaling"}) METHOD_NAME.fs.heat_flux_x0.unfix() METHOD_NAME.fs.contact.heat_flux_x1.fix() METHOD_NAME.fs.contact.initialize_build( fix_heat_flux_x0=False, optarg={"nlp_scaling_method": "user-scaling"} )
5,637
test check if key in list of
import pytest from chemistry_and_robots.kg_operations import dict_and_list as dal @pytest.mark.parametrize( "list_of_dict,key,value,expected_list_of_dict", [ ([], "key", "value", []), ([{"key": "value", "key2": "value"}, {"key": "value_", "key2": "value_"}], "key", "value", [{"key": "value", "key2": "value"}]), ([{"key": "value", "key2": "value_"}, {"key": "value_", "key2": "value_"}], "key", "value", [{"key": "value", "key2": "value_"}]), ([{"key": "value", "key2": "value"}, {"key": "value_", "key2": "value_"}], "key", "value_", [{"key": "value_", "key2": "value_"}]), ([{"key": "value", "key2": "value"}, {"key": "value", "key2": "value_"}], "key", "value", [{"key": "value", "key2": "value"}, {"key": "value", "key2": "value_"}]), ], ) def test_get_sublist_in_list_of_dict_matching_key_value(list_of_dict, key, value, expected_list_of_dict): assert dal.get_sublist_in_list_of_dict_matching_key_value(list_of_dict, key, value) == expected_list_of_dict @pytest.mark.parametrize( "list_of_dict,key,expected_list_of_value", [ ([], "key", []), ([{"key": "value", "key2": "value"}, {"key": "value_", "key2": "value_"}], "key", ["value", "value_"]), ], ) def test_get_value_from_list_of_dict(list_of_dict, key, expected_list_of_value): assert sorted(dal.get_value_from_list_of_dict(list_of_dict, key)) == sorted(expected_list_of_value) @pytest.mark.parametrize( "list_of_dict,key,expected_list_of_value", [ ([], "key", []), ([{"key": "value", "key2": "value"}, {"key": "value_", "key2": "value_"}], "key", ["value", "value_"]), ([{"key": "value", "key2": "value"}, {"key": "value", "key2": "value_"}], "key", ["value"]), ], ) def test_get_unique_values_in_list_of_dict(list_of_dict, key, expected_list_of_value): assert sorted(dal.get_unique_values_in_list_of_dict(list_of_dict, key)) == sorted(expected_list_of_value) @pytest.mark.parametrize( "list_of_dict,wanted_keys,expected_list_of_dict", [ ([], [], []), ([{"key": "value", "key2": "value"}], ["key"], [{"key": "value"}]), ([{"key": "value", "key2": "value"}], ["key2"], [{"key2": "value"}]), ([{"key": "value", "key2": "value"}], ["key", "key2"], [{"key": "value", "key2": "value"}]), ([{"key": "value", "key2": "value"}, {"key": "value_", "key2": "value_"}], ["key"], [{"key": "value"}, {"key": "value_"}]), ], ) def test_keep_wanted_keys_from_list_of_dict(list_of_dict, wanted_keys, expected_list_of_dict): assert dal.keep_wanted_keys_from_list_of_dict(list_of_dict, wanted_keys) == expected_list_of_dict @pytest.mark.parametrize( "list_of_dict,unwanted_keys,expected_list_of_dict", [ ([], [], []), ([{"key": "value", "key2": "value"}], ["key"], [{"key2": "value"}]), ([{"key": "value", "key2": "value"}], ["key2"], [{"key": "value"}]), ([{"key": "value", "key2": "value"}], ["key", "key2"], []), ([{"key": "value", "key2": "value"}, {"key": "value_", "key2": "value_"}], ["key"], [{"key2": "value"}, {"key2": "value_"}]), ], ) def test_remove_unwanted_keys_from_list_of_dict(list_of_dict, unwanted_keys, expected_list_of_dict): assert dal.remove_unwanted_keys_from_list_of_dict(list_of_dict, unwanted_keys) == expected_list_of_dict @pytest.mark.parametrize( "list_of_dict,expected_list_of_dict", [ ([], []), ([{"key": "value"}, {"key": "value"}], [{"key": "value"}]), ], ) def test_remove_duplicate_dict_from_list_of_dict(list_of_dict, expected_list_of_dict): assert dal.remove_duplicate_dict_from_list_of_dict(list_of_dict) == expected_list_of_dict @pytest.mark.parametrize( "list_of_dict,key,expected_result", [ ([], "key", False), ([{"key": "value"}], "key", True), ([{"key": "value"}], "key2", False), ([{"key": "value"}, {"key2": "value2"}], "key", True), ([{"key": "value"}, {"key2": "value2"}], "key3", False), ], ) def METHOD_NAME(list_of_dict, key, expected_result): assert dal.check_if_key_in_list_of_dict(list_of_dict, key) == expected_result @pytest.mark.parametrize( "list_a,list_b,expected_result", [ ([], [], True), (None, None, True), (None, [], False), ([], None, False), ([1, 2, 3], [1, 2, 3], True), ([1, 2, 3], [1, 2, 3, 4], False), ([1, 2, 3], [3, 2, 1], True), (["1", "2", "3"], ["1", "2", "3"], True), (["1", "2", "3"], ["3", "2", "1"], True), ], ) def test_check_if_two_lists_equal(list_a, list_b, expected_result): assert dal.check_if_two_lists_equal(list_a, list_b) == expected_result
5,638
put container
""" Swift utility class =================== Author: Anthony Stanton <anthony.stanton@gmail.com> """ import logging import sys from errno import EEXIST from os import makedirs from os.path import dirname, isdir import salt.utils.files # Get logging started log = logging.getLogger(__name__) HAS_SWIFT = False try: from swiftclient import client HAS_SWIFT = True except ImportError: pass def check_swift(): return HAS_SWIFT def mkdirs(path): try: makedirs(path) except OSError as err: if err.errno != EEXIST: raise # we've been playing fast and loose with kwargs, but the swiftclient isn't # going to accept any old thing def _sanitize(kwargs): variables = ( "user", "key", "authurl", "retries", "preauthurl", "preauthtoken", "snet", "starting_backoff", "max_backoff", "tenant_name", "os_options", "auth_version", "cacert", "insecure", "ssl_compression", ) ret = {} for var in kwargs: if var in variables: ret[var] = kwargs[var] return ret class SaltSwift: """ Class for all swiftclient functions """ def __init__( self, user, tenant_name, auth_url, password=None, auth_version=2, **kwargs ): """ Set up openstack credentials """ if not HAS_SWIFT: log.error( "Error:: unable to find swiftclient. Try installing it from the" " appropriate repository." ) return None self.kwargs = kwargs.copy() self.kwargs["user"] = user self.kwargs["password"] = password self.kwargs["tenant_name"] = tenant_name self.kwargs["authurl"] = auth_url self.kwargs["auth_version"] = auth_version if "key" not in self.kwargs: self.kwargs["key"] = password self.kwargs = _sanitize(self.kwargs) self.conn = client.Connection(**self.kwargs) def get_account(self): """ List Swift containers """ try: listing = self.conn.get_account() return listing except Exception as exc: # pylint: disable=broad-except log.error("There was an error::") if hasattr(exc, "code") and hasattr(exc, "msg"): log.error(" Code: %s: %s", exc.code, exc.msg) log.error(" Content: \n%s", getattr(exc, "read", lambda: str(exc))()) return False def get_container(self, cont): """ List files in a Swift container """ try: listing = self.conn.get_container(cont) return listing except Exception as exc: # pylint: disable=broad-except log.error("There was an error::") if hasattr(exc, "code") and hasattr(exc, "msg"): log.error(" Code: %s: %s", exc.code, exc.msg) log.error(" Content: \n%s", getattr(exc, "read", lambda: str(exc))()) return False def METHOD_NAME(self, cont): """ Create a new Swift container """ try: self.conn.METHOD_NAME(cont) return True except Exception as exc: # pylint: disable=broad-except log.error("There was an error::") if hasattr(exc, "code") and hasattr(exc, "msg"): log.error(" Code: %s: %s", exc.code, exc.msg) log.error(" Content: \n%s", getattr(exc, "read", lambda: str(exc))()) return False def delete_container(self, cont): """ Delete a Swift container """ try: self.conn.delete_container(cont) return True except Exception as exc: # pylint: disable=broad-except log.error("There was an error::") if hasattr(exc, "code") and hasattr(exc, "msg"): log.error(" Code: %s: %s", exc.code, exc.msg) log.error(" Content: \n%s", getattr(exc, "read", lambda: str(exc))()) return False def post_container(self, cont, metadata=None): """ Update container metadata """ def head_container(self, cont): """ Get container metadata """ def get_object(self, cont, obj, local_file=None, return_bin=False): """ Retrieve a file from Swift """ try: if local_file is None and return_bin is False: return False headers, body = self.conn.get_object(cont, obj, resp_chunk_size=65536) if return_bin is True: fp = sys.stdout else: dirpath = dirname(local_file) if dirpath and not isdir(dirpath): mkdirs(dirpath) # pylint: disable=resource-leakage fp = salt.utils.files.fopen(local_file, "wb") # pylint: enable=resource-leakage read_length = 0 for chunk in body: read_length += len(chunk) fp.write(chunk) fp.close() return True # ClientException # file/dir exceptions except Exception as exc: # pylint: disable=broad-except log.error("There was an error::") if hasattr(exc, "code") and hasattr(exc, "msg"): log.error(" Code: %s: %s", exc.code, exc.msg) log.error(" Content: \n%s", getattr(exc, "read", lambda: str(exc))()) return False def put_object(self, cont, obj, local_file): """ Upload a file to Swift """ try: with salt.utils.files.fopen(local_file, "rb") as fp_: self.conn.put_object(cont, obj, fp_) return True except Exception as exc: # pylint: disable=broad-except log.error("There was an error::") if hasattr(exc, "code") and hasattr(exc, "msg"): log.error(" Code: %s: %s", exc.code, exc.msg) log.error(" Content: \n%s", getattr(exc, "read", lambda: str(exc))()) return False def delete_object(self, cont, obj): """ Delete a file from Swift """ try: self.conn.delete_object(cont, obj) return True except Exception as exc: # pylint: disable=broad-except log.error("There was an error::") if hasattr(exc, "code") and hasattr(exc, "msg"): log.error(" Code: %s: %s", exc.code, exc.msg) log.error(" Content: \n%s", getattr(exc, "read", lambda: str(exc))()) return False def head_object(self, cont, obj): """ Get object metadata """ def post_object(self, cont, obj, metadata): """ Update object metadata """
5,639
get file sys for file
import builtins import os class AntFileSystem(object): def __init__(self, uri): raise NotImplementedError def exists(self, filename): raise NotImplementedError def remove(self, filename): raise NotImplementedError def stat(self, filename): raise NotImplementedError def list_dir(self, dirname): raise NotImplementedError def makedirs(self, dirname): raise NotImplementedError def rename(self, oldname, newname, overwrite=False): raise NotImplementedError def remove_dir(self, dirname): raise NotImplementedError def create_dir(self, dirname): raise NotImplementedError def open(self, filename, mode): raise NotImplementedError def close(self): pass def __enter__(self): return self def __exit__(self, type=None, value=None, trace=None): pass source_open = builtins.open source_stat = os.stat source_listdir = os.listdir source_mkdir = os.mkdir source_exists = os.path.exists source_remove = os.remove source_rename = os.rename source_rmdir = os.rmdir source_makedirs = os.makedirs source_isdir = os.path.isdir def singleton(constructor): env = [None] def wrap(*args, **kwargs): if env[0] is None: env[0] = constructor(*args, **kwargs) return env[0] return wrap PANGU_SCHEMA = "pangu://" def is_pangu_path(path): return isinstance(path, str) and path.startswith(PANGU_SCHEMA) @singleton class FileSystemProxy(object): def __init__(self): self._fs_registry = {} def METHOD_NAME(self, filename): schema, _ = filename.split("://") return self._fs_registry[schema](filename) def regist_file_system(self, schema, file_sys): if not issubclass(file_sys, AntFileSystem): raise TypeError("File sys %s must be sub class of %s" % (file_sys.__name__, AntFileSystem.__name__)) self._fs_registry[schema] = file_sys def exists(self, filename): if not isinstance(filename, str) or not is_pangu_path(filename): return source_exists(filename) with self.METHOD_NAME(filename) as fs: return fs.exists(filename) def remove(self, filename, *args, dir_fd=None): if not is_pangu_path(filename): return source_remove(filename) with self.METHOD_NAME(filename) as fs: return fs.remove(filename) def listdir(self, dirname="."): if not isinstance(dirname, str) or not is_pangu_path(dirname): return source_listdir(dirname) with self.METHOD_NAME(dirname) as fs: return fs.list_dir(dirname) def makedirs(self, dirname, mode=0o777, exist_ok=False): if not is_pangu_path(dirname): return source_makedirs(dirname, mode, exist_ok) with self.METHOD_NAME(dirname) as fs: return fs.makedirs(dirname) def rename(self, oldname, newname, *args, src_dir_fd=None, dst_dir_fd=None): if not is_pangu_path(oldname): return source_rename(oldname, newname) with self.METHOD_NAME(oldname) as src_fs: with self.METHOD_NAME(newname) as target_fs: if src_fs != target_fs: raise NotImplementedError("Renaming from %s to %s not implemented" % (oldname, newname)) return src_fs.rename(oldname, newname) def stat(self, path, *, dir_fd=None, follow_symlinks=True): if not isinstance(path, str) or not is_pangu_path(path): return source_stat(path) with self.METHOD_NAME(path) as fs: return fs.stat(path) def rmdir(self, dirname, *args, dir_fd=None): if not is_pangu_path(dirname): return source_rmdir(dirname, dir_fd=dir_fd) with self.METHOD_NAME(dirname) as fs: return fs.remove_dir(dirname) def mkdir(self, dirname, mode=0o777, *, dir_fd=None): if not is_pangu_path(dirname): return source_mkdir(dirname) with self.METHOD_NAME(dirname) as fs: return fs.create_dir(dirname) def open( self, filename, mode="r", buffering=-1, encoding=None, errors=None, newline=None, closefd=True, opener=None, ): if isinstance(filename, int) or not is_pangu_path(filename): return source_open( filename, mode, buffering, encoding, errors, newline, closefd, opener, ) fs = self.METHOD_NAME(filename) return fs.open(filename, mode) def isdir(self, s): if not is_pangu_path(s): return source_isdir(s) fs = self.METHOD_NAME(s) return fs._is_dir(s)
5,640
generator factory
# Copyright (c) 2021 The Regents of the University of California # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ This scripts is used for checking the correctness of statistics reported by the gem5 simulator. It can excercise certain components in the memory subsystem. The reported values could be used to compare against a validated set of statistics. """ import m5 import argparse import importlib from pathlib import Path from m5.objects import Root, MemorySize from m5.stats.gem5stats import get_simstat from gem5.components.boards.test_board import TestBoard def METHOD_NAME( generator_class: str, generator_cores: int, mem_size: MemorySize ): if generator_class == "LinearGenerator": from gem5.components.processors.linear_generator import LinearGenerator return LinearGenerator( duration="250us", rate="40GB/s", num_cores=generator_cores, max_addr=mem_size, ) elif generator_class == "RandomGenerator": from gem5.components.processors.random_generator import RandomGenerator return RandomGenerator( duration="250us", rate="40GB/s", num_cores=generator_cores, max_addr=mem_size, ) elif generator_class == "GUPSGenerator": if generator_cores != 1: raise ValueError( "Only one core should be used with GUPSGenerator. " "In order to use multiple cores of GUPS generator, use either " "GUPSGeneratorEP or GUPSGeneratorPAR." ) from gem5.components.processors.gups_generator import GUPSGenerator table_size = f"{int(mem_size / 2)}B" return GUPSGenerator(0, table_size, update_limit=1000, clk_freq="2GHz") elif generator_class == "GUPSGeneratorEP": from gem5.components.processors.gups_generator_ep import ( GUPSGeneratorEP, ) table_size = f"{int(mem_size / 2)}B" return GUPSGeneratorEP( generator_cores, 0, table_size, update_limit=1000, clk_freq="2GHz" ) elif generator_class == "GUPSGeneratorPAR": from gem5.components.processors.gups_generator_par import ( GUPSGeneratorPAR, ) table_size = f"{int(mem_size / 2)}B" return GUPSGeneratorPAR( generator_cores, 0, table_size, update_limit=1000, clk_freq="2GHz" ) else: raise ValueError(f"Unknown generator class {generator_class}") def cache_factory(cache_class: str): if cache_class == "NoCache": from gem5.components.cachehierarchies.classic.no_cache import NoCache return NoCache() elif cache_class == "PrivateL1": from gem5.components.cachehierarchies.classic.private_l1_cache_hierarchy import ( PrivateL1CacheHierarchy, ) return PrivateL1CacheHierarchy(l1d_size="32KiB", l1i_size="32KiB") elif cache_class == "PrivateL1PrivateL2": from gem5.components.cachehierarchies.classic.private_l1_private_l2_cache_hierarchy import ( PrivateL1PrivateL2CacheHierarchy, ) return PrivateL1PrivateL2CacheHierarchy( l1d_size="32KiB", l1i_size="32KiB", l2_size="256KiB" ) elif cache_class == "MESITwoLevel": from gem5.components.cachehierarchies.ruby.mesi_two_level_cache_hierarchy import ( MESITwoLevelCacheHierarchy, ) return MESITwoLevelCacheHierarchy( l1i_size="32KiB", l1i_assoc="8", l1d_size="32KiB", l1d_assoc="8", l2_size="256KiB", l2_assoc="4", num_l2_banks=1, ) else: raise ValueError(f"The cache class {cache_class} is not supported.") parser = argparse.ArgumentParser( description="A traffic generator that can be used to test a gem5 " "memory component." ) parser.add_argument( "generator_class", type=str, help="The class of generator to use.", choices=[ "LinearGenerator", "RandomGenerator", "GUPSGenerator", "GUPSGeneratorEP", "GUPSGeneratorPAR", ], ) parser.add_argument( "generator_cores", type=int, help="The number of generator cores to use." ) parser.add_argument( "cache_class", type=str, help="The cache class to import and instantiate.", choices=["NoCache", "PrivateL1", "PrivateL1PrivateL2", "MESITwoLevel"], ) parser.add_argument( "mem_module", type=str, help="The python module to import for memory." ) parser.add_argument( "mem_class", type=str, help="The memory class to import and instantiate." ) parser.add_argument( "mem_args", nargs="*", help="The arguments needed to instantiate the memory class.", ) args = parser.parse_args() cache_hierarchy = cache_factory(args.cache_class) memory_class = getattr( importlib.import_module(args.mem_module), args.mem_class ) memory = memory_class(*args.mem_args) generator = METHOD_NAME( args.generator_class, args.generator_cores, memory.get_size() ) # We use the Test Board. This is a special board to run traffic generation # tasks motherboard = TestBoard( clk_freq="3GHz", generator=generator, memory=memory, cache_hierarchy=cache_hierarchy, ) root = Root(full_system=False, system=motherboard) motherboard._pre_instantiate() m5.instantiate() generator.start_traffic() print("Beginning simulation!") exit_event = m5.simulate() print(f"Exiting @ tick {m5.curTick()} because {exit_event.getCause()}.") simstats = get_simstat( [core.generator for core in generator.get_cores()], prepare_stats=True ) json_output = Path(m5.options.outdir) / "output.json" with open(json_output, "w") as stats_file: simstats.dump(stats_file, indent=2)
5,641
test unique identifier
import re from typing import Sequence import pytest from dagster import ( DagsterInvalidDefinitionError, DagsterInvalidInvocationError, DailyPartitionsDefinition, DynamicPartitionsDefinition, HourlyPartitionsDefinition, MultiPartitionsDefinition, PartitionKeyRange, StaticPartitionsDefinition, define_asset_job, job, ) from dagster._check import CheckError from dagster._core.test_utils import instance_for_test @pytest.mark.parametrize( argnames=["partition_keys"], argvalues=[(["a_partition"],), ([str(x) for x in range(10)],)], ) def test_static_partitions(partition_keys: Sequence[str]): static_partitions = StaticPartitionsDefinition(partition_keys) assert static_partitions.get_partition_keys() == partition_keys def test_invalid_partition_key(): with pytest.raises(DagsterInvalidDefinitionError, match="'...'"): StaticPartitionsDefinition(["foo", "foo...bar"]) def test_duplicate_partition_key(): with pytest.raises( DagsterInvalidDefinitionError, match=re.escape("Duplicate instances of partition keys: ['foo']"), ): StaticPartitionsDefinition(["foo", "bar", "foo"]) def test_partitions_def_to_string(): hourly = HourlyPartitionsDefinition( start_date="Tue Jan 11 1:30PM", timezone="America/Los_Angeles", fmt="%a %b %d %I:%M%p" ) assert str(hourly) == "Hourly, starting Thu Jan 11 01:30PM America/Los_Angeles." daily = DailyPartitionsDefinition(start_date="2020-01-01", end_offset=1) assert str(daily) == "Daily, starting 2020-01-01 UTC. End offsetted by 1 partition." static = StaticPartitionsDefinition(["foo", "bar", "baz", "qux"]) assert str(static) == "'foo', 'bar', 'baz', 'qux'" dynamic_fn = lambda _current_time: ["a_partition"] dynamic = DynamicPartitionsDefinition(dynamic_fn) assert str(dynamic) == "'a_partition'" dynamic = DynamicPartitionsDefinition(name="foo") assert str(dynamic) == 'Dynamic partitions: "foo"' def test_static_partition_keys_in_range(): partitions = StaticPartitionsDefinition(["foo", "bar", "baz", "qux"]) assert partitions.get_partition_keys_in_range(PartitionKeyRange(start="foo", end="baz")) == [ "foo", "bar", "baz", ] with pytest.raises(DagsterInvalidInvocationError): partitions.get_partition_keys_in_range( PartitionKeyRange(start="foo", end="nonexistent_key") ) def METHOD_NAME(): assert ( StaticPartitionsDefinition(["a", "b", "c"]).get_serializable_unique_identifier() != StaticPartitionsDefinition(["a", "b"]).get_serializable_unique_identifier() ) assert ( StaticPartitionsDefinition(["a", "b", "c"]).get_serializable_unique_identifier() == StaticPartitionsDefinition(["a", "b", "c"]).get_serializable_unique_identifier() ) with instance_for_test() as instance: dynamic_def = DynamicPartitionsDefinition(name="foo") identifier1 = dynamic_def.get_serializable_unique_identifier( dynamic_partitions_store=instance ) instance.add_dynamic_partitions(dynamic_def.name, ["bar"]) assert identifier1 != dynamic_def.get_serializable_unique_identifier( dynamic_partitions_store=instance ) dynamic_dimension_def = DynamicPartitionsDefinition(name="fruits") multipartitions_def = MultiPartitionsDefinition( {"a": StaticPartitionsDefinition(["a", "b", "c"]), "b": dynamic_dimension_def} ) serializable_unique_id = multipartitions_def.get_serializable_unique_identifier(instance) instance.add_dynamic_partitions(dynamic_dimension_def.name, ["apple"]) assert serializable_unique_id != multipartitions_def.get_serializable_unique_identifier( instance ) assert ( MultiPartitionsDefinition( { "a": StaticPartitionsDefinition(["a", "b", "c"]), "b": StaticPartitionsDefinition(["1"]), } ).get_serializable_unique_identifier() != MultiPartitionsDefinition( { "different_name": StaticPartitionsDefinition(["a", "b", "c"]), "b": StaticPartitionsDefinition(["1"]), } ).get_serializable_unique_identifier() ) assert ( MultiPartitionsDefinition( { "a": StaticPartitionsDefinition(["a", "b", "c"]), "b": StaticPartitionsDefinition(["1"]), } ).get_serializable_unique_identifier() != MultiPartitionsDefinition( { "a": StaticPartitionsDefinition(["a", "b"]), "b": StaticPartitionsDefinition(["1"]), } ).get_serializable_unique_identifier() ) def test_static_partitions_subset(): partitions = StaticPartitionsDefinition(["foo", "bar", "baz", "qux"]) subset = partitions.empty_subset() assert len(subset) == 0 assert "bar" not in subset with_some_partitions = subset.with_partition_keys(["foo", "bar"]) assert with_some_partitions.get_partition_keys_not_in_subset() == {"baz", "qux"} serialized = with_some_partitions.serialize() deserialized = partitions.deserialize_subset(serialized) assert deserialized.get_partition_keys_not_in_subset() == {"baz", "qux"} assert len(with_some_partitions) == 2 assert len(deserialized) == 2 assert "bar" in with_some_partitions def test_static_partitions_invalid_chars(): with pytest.raises(DagsterInvalidDefinitionError): StaticPartitionsDefinition(["foo...bar"]) with pytest.raises(DagsterInvalidDefinitionError, match="n"): StaticPartitionsDefinition(["foo\nfoo"]) with pytest.raises(DagsterInvalidDefinitionError, match="b"): StaticPartitionsDefinition(["foo\bfoo"]) def test_run_request_for_partition_invalid_with_dynamic_partitions(): @job(partitions_def=DynamicPartitionsDefinition(name="foo")) def dynamic_partitions_job(): pass with pytest.raises(CheckError, match="not supported for dynamic partitions"): dynamic_partitions_job.run_request_for_partition("nonexistent") asset_job = define_asset_job("my_job", partitions_def=DynamicPartitionsDefinition(name="foo")) with pytest.raises(CheckError, match="not supported for dynamic partitions"): asset_job.run_request_for_partition("nonexistent")
5,642
minutes parse links
import datetime import dateutil.parser from city_scrapers_core.constants import COMMISSION from city_scrapers_core.items import Meeting from city_scrapers_core.spiders import CityScrapersSpider class ChiNorthRiverMentalHealthSpider(CityScrapersSpider): name = "chi_north_river_mental_health" agency = ( "North River Expanded Mental Health Services Program and Governing Commission" ) timezone = "America/Chicago" main_url = "https://www.northriverexpandedmentalhealthservicescommission.org" start_urls = [ f"{main_url}/minutes.html", f"{main_url}/index.html", ] def parse(self, response): """ `parse` should always `yield` Meeting items. Change the `_parse_title`, `_parse_start`, etc methods to fit your scraping needs. """ if response.url == self.start_urls[0]: yield from self._parse_minutes(response) else: yield from self._parse_index(response) def _parse_minutes(self, response): for item in response.xpath('.//div[@class="wsite-section-elements"]//a'): valid_start = self._minutes_parse_start(item) if not valid_start: continue meeting = Meeting( title="Governing Commission", description="", classification=COMMISSION, start=valid_start, end=None, all_day=False, time_notes="", location=self._minutes_parse_location(item), links=self.METHOD_NAME(item, response), source=response.url, ) meeting["status"] = self._get_status(meeting) meeting["id"] = self._get_id(meeting) yield meeting def _parse_index(self, response): item = response.css("td:nth-child(2) div:nth-child(2)") meeting = Meeting( title="Governing Commission", description="", classification=COMMISSION, start=self._index_parse_start(item), end=None, all_day=False, time_notes="", location=self._index_parse_location(item), links=self._index_parse_links(item, response), source=response.url, ) meeting["status"] = self._get_status(meeting) meeting["id"] = self._get_id(meeting) yield meeting def _minutes_parse_start(self, item): """Parse start datetime as a naive datetime object.""" date_components = item.re( "(?P<month>[A-Za-z\\.]+)\\ " "(?P<day>[0-9]+)(th)?,((\\ |\\xa0)?)" "(?P<year>[0-9]+)?" ) try: if date_components[0] == "anuary": date_components[0] = "January" date_str = ( f"{date_components[-1]} {date_components[0]} {date_components[1]}" ) if date_str == "20 May 20": date_str = "20 May 2015" elif date_str == " September 21": date_str = "2016 September 21" elif date_str == "201 July 15": date_str = "2015 July 15" return dateutil.parser.parse(f"{date_str} 7PM") except IndexError: return None def _index_parse_start(self, item): date_components = item.re( "<br>Date: (?P<date>.*)<br>Time: (?P<time>.*)<br>Place:" ) return dateutil.parser.parse(" ".join(date_components)) def _minutes_parse_location(self, item): """Parse or generate location.""" start = self._minutes_parse_start(item) if start < datetime.datetime(2017, 3, 15): return { "address": "3857 N. Kostner Avenue Chicago, IL 60641", "name": "St. John Episcopal Church Parish Hall", } else: return { "address": "3525 W. Peterson Ave, #306 Chicago, IL 60659", "name": "North River EMHSP governing commission office", } def _index_parse_location(self, item): place = item.re("<br>Place: (?P<location>[a-zA-Z0-9 ]+(?!\\\\<br\\\\>))")[0] # Leaving "name" value empty for now.. return {"name": "", "address": place} def METHOD_NAME(self, item, response): """Parse or generate links.""" return [{"href": response.urljoin(item.attrib["href"]), "title": "Minutes"}] def _index_parse_links(self, item, response): return [ { "title": link.css("::text").get(), "href": response.urljoin(link.attrib["href"]), } for link in item.css("a") ]
5,643
size hint
from PySide6.QtCore import QSize, Qt from PySide6.QtGui import QPainter, QPen from PySide6.QtWidgets import QFrame, QHBoxLayout, QLabel, QLineEdit, QScrollArea, QVBoxLayout, QWidget from angrmanagement.config import Conf from .qast_viewer import QASTViewer class AddressPiece: __slots__ = ["address"] def __init__(self, address): self.address = address class NewLinePiece: pass class QMemoryView(QWidget): def __init__(self, state, workspace, parent=None): super().__init__(parent) self.workspace = workspace self.state = state self.cols = None self.rows = None # The current address being displayed. Must be set through .address self._address = None self._objects = [] @property def address(self): return self._address @address.setter def address(self, v): if v != self._address: self._address = v self._reload_objects() def paintEvent(self, event): if self.address is None: return MARGIN_LEFT = 5 MARGIN_TOP = 5 LINE_MARGIN = 3 painter = QPainter(self) painter.setPen(QPen(Qt.black, 1)) painter.setFont(Conf.symexec_font) x = MARGIN_LEFT y = MARGIN_TOP for obj in self._objects: obj_type = type(obj) if obj_type is NewLinePiece: # carriage return x = MARGIN_LEFT y += Conf.symexec_font_height + LINE_MARGIN elif obj_type is AddressPiece: # address addr_str = "%08x" % obj.address painter.drawText(x, y + Conf.symexec_font_ascent, addr_str) x += Conf.symexec_font_width * len(addr_str) x += 7 elif obj_type is QASTViewer: # AST viewer obj.x = x obj.y = y obj.paint(painter) x += obj.width + 2 else: raise TypeError("paintEvent(): Unsupported object type %s." % obj_type) def _reload_objects(self): """ Reload addresses and text pieces to be displayed. :return: None """ objects = [] addr_base = self.address for row in range(self.rows): addr = addr_base + row * self.cols # address addr_piece = AddressPiece(addr) objects.append(addr_piece) # QASTViewer objects for col in range(self.cols): data = self.state.memory.load(addr + col, 1, inspect=False, disable_actions=True) ast_viewer = QASTViewer(data, workspace=self.workspace, custom_painting=True, display_size=False) objects.append(ast_viewer) # end of the line newline_piece = NewLinePiece() objects.append(newline_piece) self._objects = objects class QMemoryViewer(QFrame): def __init__(self, state, parent, workspace): super().__init__(parent) self.workspace = workspace self._scrollarea: QScrollArea self._txt_addr: QLineEdit self._view: QMemoryView self._addr = None # the address to display self.state = state self._init_widgets() self.state.am_subscribe(self._watch_state) @property def addr(self): return self._addr @addr.setter def addr(self, v): if self._addr != v: self._addr = v self.reload() # # Overridden methods # def METHOD_NAME(self, *args, **kwargs): return QSize(100, 100) # # Public methods # def reload(self): if self.state.am_none: return if self.addr is None: return self._refresh_memory_view() # # Event handlers # def _on_address_entered(self): address_str = self._txt_addr.text() try: address = int(address_str, 16) except ValueError: return self.addr = address # # Private methods # def _init_widgets(self): layout = QVBoxLayout() # address lbl_addr = QLabel() lbl_addr.setText("Address") txt_addr = QLineEdit() txt_addr.returnPressed.connect(self._on_address_entered) self._txt_addr = txt_addr top_layout = QHBoxLayout() top_layout.addWidget(lbl_addr) top_layout.addWidget(txt_addr) self._view = QMemoryView(self.state, self.workspace) area = QScrollArea() self._scrollarea = area area.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded) area.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded) area.setWidgetResizable(True) area.setWidget(self._view) layout.addLayout(top_layout) layout.addWidget(area) layout.setContentsMargins(0, 0, 0, 0) self.setLayout(layout) def _refresh_memory_view(self): self._view.cols = 16 self._view.rows = 10 self._view.address = self.addr self._view.repaint() def _watch_state(self, **kwargs): self.reload()
5,644
generate image
import numpy as np import pandas as pd import pytest from datasets import load_dataset from datasets.arrow_dataset import Dataset from PIL import Image from sklearn.neighbors import NearestNeighbors from cleanlab.datalab.datalab import Datalab SEED = 42 LABEL_NAME = "star" @pytest.fixture def dataset(): data_dict = { "id": [ "7bd227d9-afc9-11e6-aba1-c4b301cdf627", "7bd22905-afc9-11e6-a5dc-c4b301cdf627", "7bd2299c-afc9-11e6-85d6-c4b301cdf627", "7bd22a26-afc9-11e6-9309-c4b301cdf627", "7bd22aba-afc9-11e6-8293-c4b301cdf627", ], "package_name": [ "com.mantz_it.rfanalyzer", "com.mantz_it.rfanalyzer", "com.mantz_it.rfanalyzer", "com.mantz_it.rfanalyzer", "com.mantz_it.rfanalyzer", ], "review": [ "Great app! The new version now works on my Bravia Android TV which is great as it's right by my rooftop aerial cable. The scan feature would be useful...any ETA on when this will be available? Also the option to import a list of bookmarks e.g. from a simple properties file would be useful.", "Great It's not fully optimised and has some issues with crashing but still a nice app especially considering the price and it's open source.", "Works on a Nexus 6p I'm still messing around with my hackrf but it works with my Nexus 6p Trond usb-c to usb host adapter. Thanks!", "The bandwidth seemed to be limited to maximum 2 MHz or so. I tried to increase the bandwidth but not possible. I purchased this is because one of the pictures in the advertisement showed the 2.4GHz band with around 10MHz or more bandwidth. Is it not possible to increase the bandwidth? If not it is just the same performance as other free APPs.", "Works well with my Hackrf Hopefully new updates will arrive for extra functions", ], "date": [ "October 12 2016", "August 23 2016", "August 04 2016", "July 25 2016", "July 22 2016", ], "star": [4, 4, 5, 3, 5], "version_id": [1487, 1487, 1487, 1487, 1487], } return Dataset.from_dict(data_dict) @pytest.fixture def label_name(): return LABEL_NAME @pytest.fixture def lab(dataset, label_name): return Datalab(data=dataset, label_name=label_name) @pytest.fixture def large_lab(): np.random.seed(SEED) N = 100 K = 2 data = np.random.rand(N, 2) labels = np.random.randint(0, K, size=N) pred_probs = np.random.rand(N, K) pred_probs /= pred_probs.sum(axis=1, keepdims=True) lab = Datalab( data={"features": data, "label": labels, "pred_probs": pred_probs}, label_name="label" ) knn = NearestNeighbors(n_neighbors=25, metric="euclidean").fit(data) knn_graph = knn.kneighbors_graph(mode="distance") lab.info["statistics"]["unit_test_knn_graph"] = knn_graph return lab @pytest.fixture def pred_probs(dataset): np.random.seed(SEED) return np.random.rand(len(dataset), 3) @pytest.fixture def custom_issue_manager(): from cleanlab.datalab.internal.issue_manager.issue_manager import IssueManager class CustomIssueManager(IssueManager): issue_name = "custom_issue" def find_issues(self, custom_argument: int = 1, **_) -> None: # Flag example as an issue if the custom argument equals its index scores = [ abs(i - custom_argument) / (i + custom_argument) for i in range(len(self.datalab.data)) ] self.issues = pd.DataFrame( { f"is_{self.issue_name}_issue": [ i == custom_argument for i in range(len(self.datalab.data)) ], self.issue_score_key: scores, }, ) summary_score = np.mean(scores) self.summary = self.make_summary(score=summary_score) return CustomIssueManager def METHOD_NAME(): arr = np.random.randint(low=0, high=256, size=(300, 300, 3), dtype=np.uint8) img = Image.fromarray(arr, mode="RGB") return img @pytest.fixture def image_dataset(): data_path = "./tests/datalab/data" dataset = load_dataset( "imagefolder", data_dir=data_path, split="train", ) return dataset
5,645
create rule
import json class Trace(object): """ An llbuild build system trace """ # Get a cached trace. @classmethod def frompath(cls, path): db = cls._traces.get(path) if db is None: cls._traces[path] = db = Trace(path) return db _traces = {} def __init__(self, path): self.events = [] self.tasks = {} self.rules = {} # FIXME: Move this format to just JSON for ease of loading. with open(path) as f: lines = list(f) print((lines[0], lines[-1])) assert(lines.pop(0) == '[\n') assert(lines.pop(-1) == ']\n') for line in lines: assert(line.startswith('{ ')) assert(line.endswith('},\n')) line = line[2:-3] event_data = [eval(s) for s in line.split(', ')] handler = _event_handlers.get(event_data[0]) if handler is None: raise NotImplementedError( "unknown event: {}".format(event_data[0])) event = handler(self, event_data[1:]) if event: self.events.append(event) # MARK: Event Parsing class Rule(object): def __init__(self, data): (name, key) = data self.name = name self.key = key class Task(object): def __init__(self, data): (name,) = data self.name = name self.rule = None ### class Event(object): @property def isReadiedTask(self): return isinstance(self, ReadiedTask) class BuildStarted(Event): def __init__(self, trace, data): pass class BuildEnded(Event): def __init__(self, trace, data): pass class HandlingBuildInputRequest(Event): def __init__(self, trace, data): (rule,) = data self.rule = trace.rules[rule] class CheckingRuleNeedsToRun(Event): def __init__(self, trace, data): (rule,) = data self.rule = trace.rules[rule] class RuleNeedsToRun(Event): class NeverBuilt(Event): pass class InvalidValue(Event): pass class InputRebuilt(Event): def __init__(self, inputRule): self.inputRule = inputRule def __init__(self, trace, data): self.rule = trace.rules[data[0]] if data[1] == 'invalid-value': (_, _) = data self.reason = RuleNeedsToRun.InvalidValue() elif data[1] == 'never-built': (_, _) = data self.reason = RuleNeedsToRun.NeverBuilt() elif data[1] == 'input-rebuilt': (_, _, inputRule) = data self.reason = RuleNeedsToRun.InputRebuilt( trace.rules[inputRule]) else: raise NotImplementedError("unknown reason: {}".format(data)) class RuleDoesNotNeedToRun(Event): def __init__(self, trace, data): (rule,) = data self.rule = trace.rules[rule] class CreatedTaskForRule(Event): def __init__(self, trace, data): (task, rule) = data self.task = trace.tasks[task] self.rule = trace.rules[rule] self.task.rule = self.rule class HandlingTaskInputRequest(Event): def __init__(self, trace, data): (task, rule) = data self.task = trace.tasks[task] self.rule = trace.rules[rule] class AddedRulePendingTask(Event): def __init__(self, trace, data): (rule, task) = data self.rule = trace.rules[rule] self.task = trace.tasks[task] class RuleScheduledForScanning(Event): def __init__(self, trace, data): (rule,) = data self.rule = trace.rules[rule] class PausedInputRequestForRuleScan(Event): def __init__(self, trace, data): (rule,) = data self.rule = trace.rules[rule] class ReadyingTaskInputRequest(Event): def __init__(self, trace, data): (task, rule) = data self.task = trace.tasks[task] self.rule = trace.rules[rule] class CompletedTaskInputRequest(Event): def __init__(self, trace, data): (task, rule) = data self.task = trace.tasks[task] self.rule = trace.rules[rule] class UpdatedTaskWaitCount(Event): def __init__(self, trace, data): (task, count) = data self.task = trace.tasks[task] self.count = count class RuleScanningDeferredOnInput(Event): def __init__(self, trace, data): (rule, inputRule) = data self.rule = trace.rules[rule] self.inputRule = trace.rules[inputRule] class RuleScanningDeferredOnTask(Event): def __init__(self, trace, data): (rule, inputTask) = data self.rule = trace.rules[rule] self.inputTask = trace.tasks[inputTask] class RuleScanningNextInput(Event): def __init__(self, trace, data): (rule, inputRule) = data self.rule = trace.rules[rule] self.inputRule = trace.rules[inputRule] class UnblockedTask(Event): def __init__(self, trace, data): (task,) = data self.task = trace.tasks[task] class ReadiedTask(Event): def __init__(self, trace, data): (task, rule) = data self.task = trace.tasks[task] assert(self.task.rule is trace.rules[rule]) class FinishedTask(Event): def __init__(self, trace, data): (task, rule, effect) = data self.task = trace.tasks[task] assert(self.task.rule is trace.rules[rule]) self.effect = effect def METHOD_NAME(trace, data): rule = Rule(data) trace.rules[rule.name] = rule def _create_task(trace, data): task = Task(data) trace.tasks[task.name] = task _event_handlers = { "new-rule": METHOD_NAME, "new-task": _create_task, "build-started": BuildStarted, "build-ended": BuildEnded, "handling-build-input-request": HandlingBuildInputRequest, "checking-rule-needs-to-run": CheckingRuleNeedsToRun, "rule-needs-to-run": RuleNeedsToRun, "rule-does-not-need-to-run": RuleDoesNotNeedToRun, "created-task-for-rule": CreatedTaskForRule, "handling-task-input-request": HandlingTaskInputRequest, "added-rule-pending-task": AddedRulePendingTask, "rule-scheduled-for-scanning": RuleScheduledForScanning, "paused-input-request-for-rule-scan": RuleScheduledForScanning, "readying-task-input-request": ReadyingTaskInputRequest, "completed-task-input-request": CompletedTaskInputRequest, "updated-task-wait-count": UpdatedTaskWaitCount, "rule-scanning-deferred-on-input": RuleScanningDeferredOnInput, "rule-scanning-deferred-on-task": RuleScanningDeferredOnTask, "rule-scanning-next-input": RuleScanningNextInput, "unblocked-task": UnblockedTask, "readied-task": ReadiedTask, "finished-task": FinishedTask, }
5,646
arg names
# Copyright: Ankitects Pty Ltd and contributors # License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html """ Code for generating hooks. """ import os import subprocess import sys from dataclasses import dataclass from operator import attrgetter from typing import Optional sys.path.append("pylib/anki/_vendor") import stringcase @dataclass class Hook: # the name of the hook. _filter or _hook is appending automatically. name: str # string of the typed arguments passed to the callback, eg # ["kind: str", "val: int"] args: list[str] = None # string of the return type. if set, hook is a filter. return_type: Optional[str] = None # if add-ons may be relying on the legacy hook name, add it here legacy_hook: Optional[str] = None # if legacy hook takes no arguments but the new hook does, set this legacy_no_args: bool = False # if the hook replaces a deprecated one, add its name here replaces: Optional[str] = None # arguments that the hook being replaced took replaced_hook_args: Optional[list[str]] = None # docstring to add to hook class doc: Optional[str] = None def callable(self) -> str: "Convert args into a Callable." types = [] for arg in self.args or []: (name, type) = arg.split(":") type = f'"{type.strip()}"' types.append(type) types_str = ", ".join(types) return f"Callable[[{types_str}], {self.return_type or 'None'}]" def METHOD_NAME(self, args: Optional[list[str]]) -> list[str]: names = [] for arg in args or []: if not arg: continue (name, type) = arg.split(":") names.append(name.strip()) return names def full_name(self) -> str: return f"{self.name}_{self.kind()}" def kind(self) -> str: if self.return_type is not None: return "filter" else: return "hook" def classname(self) -> str: return f"_{stringcase.pascalcase(self.full_name())}" def list_code(self) -> str: return f"""\ _hooks: list[{self.callable()}] = [] """ def code(self) -> str: appenddoc = f"({', '.join(self.args or [])})" if self.doc: classdoc = f" '''{self.doc}'''\n" else: classdoc = "" code = f"""\ class {self.classname()}: {classdoc}{self.list_code()} def append(self, callback: {self.callable()}) -> None: '''{appenddoc}''' self._hooks.append(callback) def remove(self, callback: {self.callable()}) -> None: if callback in self._hooks: self._hooks.remove(callback) def count(self) -> int: return len(self._hooks) {self.fire_code()} {self.name} = {self.classname()}() """ return code def fire_code(self) -> str: if self.return_type is not None: # filter return self.filter_fire_code() else: # hook return self.hook_fire_code() def legacy_args(self) -> str: if self.legacy_no_args: # hook name only return f'"{self.legacy_hook}"' else: return ", ".join([f'"{self.legacy_hook}"'] + self.METHOD_NAME(self.args)) def replaced_args(self) -> str: args = ", ".join(self.METHOD_NAME(self.replaced_hook_args)) return f"{self.replaces}({args})" def hook_fire_code(self) -> str: METHOD_NAME = self.METHOD_NAME(self.args) args_including_self = ["self"] + (self.args or []) out = f"""\ def __call__({", ".join(args_including_self)}) -> None: for hook in self._hooks: try: hook({", ".join(METHOD_NAME)}) except: # if the hook fails, remove it self._hooks.remove(hook) raise """ if self.replaces and self.legacy_hook: raise Exception( f"Hook {self.name} replaces {self.replaces} and " "must therefore not define a legacy hook." ) elif self.replaces: out += f"""\ if {self.replaces}.count() > 0: print( "The hook {self.replaces} is deprecated.\\n" "Use {self.name} instead." ) {self.replaced_args()} """ elif self.legacy_hook: # don't run legacy hook if replaced hook exists # otherwise the legacy hook will be run twice out += f"""\ # legacy support anki.hooks.runHook({self.legacy_args()}) """ return f"{out}\n\n" def filter_fire_code(self) -> str: METHOD_NAME = self.METHOD_NAME(self.args) args_including_self = ["self"] + (self.args or []) out = f"""\ def __call__({", ".join(args_including_self)}) -> {self.return_type}: for filter in self._hooks: try: {METHOD_NAME[0]} = filter({", ".join(METHOD_NAME)}) except: # if the hook fails, remove it self._hooks.remove(filter) raise """ if self.replaces and self.legacy_hook: raise Exception( f"Hook {self.name} replaces {self.replaces} and " "must therefore not define a legacy hook." ) elif self.replaces: out += f"""\ if {self.replaces}.count() > 0: print( "The hook {self.replaces} is deprecated.\\n" "Use {self.name} instead." ) {METHOD_NAME[0]} = {self.replaced_args()} """ elif self.legacy_hook: # don't run legacy hook if replaced hook exists # otherwise the legacy hook will be run twice out += f"""\ # legacy support {METHOD_NAME[0]} = anki.hooks.runFilter({self.legacy_args()}) """ out += f"""\ return {METHOD_NAME[0]} """ return f"{out}\n\n" def write_file(path: str, hooks: list[Hook], prefix: str, suffix: str): hooks.sort(key=attrgetter("name")) code = f"{prefix}\n" for hook in hooks: code += hook.code() code += f"\n{suffix}" # work around issue with latest black if sys.platform == "win32" and "HOME" in os.environ: os.environ["USERPROFILE"] = os.environ["HOME"] with open(path, "wb") as file: file.write(code.encode("utf8")) subprocess.run([sys.executable, "-m", "black", "-q", path], check=True)
5,647
update bnf map
import csv from matrixstore.build.import_practice_stats import ( parse_practice_statistics_csv, write_practice_stats, ) from matrixstore.build.import_prescribing import ( parse_prescribing_csv, write_prescribing, ) from matrixstore.build.init_db import SCHEMA_SQL, generate_dates, import_dates from matrixstore.build.precalculate_totals import precalculate_totals_for_db from matrixstore.build.METHOD_NAME import ( delete_presentations_with_no_prescribing, move_values_from_old_code_to_new, ) def import_test_data_fast(sqlite_conn, data_factory, end_date, months=None): """ Imports the data in `data_factory` into the supplied SQLite connection without touching any external services such as BigQuery or Google Cloud Storage (and indeed without touching disk, if the SQLite database is in memory). """ dates = generate_dates(end_date, months=months) # Disable the sqlite module's magical transaction handling features because # we want to use our own transactions below previous_isolation_level = sqlite_conn.isolation_level sqlite_conn.isolation_level = None init_db(sqlite_conn, data_factory, dates) import_practice_stats(sqlite_conn, data_factory, dates) import_prescribing(sqlite_conn, data_factory, dates) METHOD_NAME(sqlite_conn, data_factory) precalculate_totals_for_db(sqlite_conn) sqlite_conn.isolation_level = previous_isolation_level sqlite_conn.commit() def init_db(sqlite_conn, data_factory, dates): sqlite_conn.executescript(SCHEMA_SQL) import_dates(sqlite_conn, dates) practice_codes = _get_active_practice_codes(data_factory, dates) sqlite_conn.executemany( "INSERT INTO practice (offset, code) VALUES (?, ?)", enumerate(practice_codes) ) def import_practice_stats(sqlite_conn, data_factory, dates): filtered_practice_stats = _filter_by_date(data_factory.practice_statistics, dates) filtered_practice_stats = list(filtered_practice_stats) if filtered_practice_stats: practice_statistics_csv = _dicts_to_csv(filtered_practice_stats) # This blows up if we give it an empty CSV because it can't find the # headers it expects practice_statistics = parse_practice_statistics_csv(practice_statistics_csv) else: practice_statistics = [] write_practice_stats(sqlite_conn, practice_statistics) def import_prescribing(sqlite_conn, data_factory, dates): filtered_prescribing = _filter_by_date(data_factory.prescribing, dates) sorted_prescribing = sorted( filtered_prescribing, key=lambda p: (p["bnf_code"], p["practice"], p["month"]) ) prescribing_csv = _dicts_to_csv(sorted_prescribing) prescribing = parse_prescribing_csv(prescribing_csv) write_prescribing(sqlite_conn, prescribing) def METHOD_NAME(sqlite_conn, data_factory): cursor = sqlite_conn.cursor() for item in data_factory.bnf_map: move_values_from_old_code_to_new( cursor, item["former_bnf_code"], item["current_bnf_code"] ) delete_presentations_with_no_prescribing(cursor) def _get_active_practice_codes(data_factory, dates): practice_codes = set() for prescription in _filter_by_date(data_factory.prescribing, dates): practice_codes.add(prescription["practice"]) for practice_stat in _filter_by_date(data_factory.practice_statistics, dates): practice_codes.add(practice_stat["practice"]) return sorted(practice_codes) def _filter_by_date(items, dates): for item in items: if item["month"][:10] in dates: yield item # `csv.writer` wants a file-like object to write its output to, but we just # want to grab each line of output as it's written. Rather than mess around # with StringIO we can just give it an ordinary list, but with its `append` # method aliased to `write` and then we can pop the lines off after # `csv.writer` has written them class ListFile(list): write = list.append def _dicts_to_csv(dicts): """ Takes an interable of dictionaries (assumed to all have the same keys) and returns an iterable of strings in CSV format. The first line contains headers, which are the dictionary keys. """ lines = ListFile() writer = None for dictionary in dicts: if not writer: fieldnames = dictionary.keys() writer = csv.DictWriter(lines, fieldnames) writer.writeheader() yield lines.pop() writer.writerow(dictionary) yield lines.pop()
5,648
check port free
import logging import os import shutil from urllib3.exceptions import NewConnectionError from requests.exceptions import ConnectionError from cryptoadvance.specter.specter_error import SpecterError from ...config import BaseConfig from ..specter_migrator import SpecterMigration from ...helpers import load_jsons from ...managers.node_manager import NodeManager import requests logger = logging.getLogger(__name__) class SpecterMigration_0001(SpecterMigration): version = "v1.6.1" # the version this migration has been rolled out # irrelevant though because we'll execute this script in any case # as we can't have yet a say on when specter has been started first def should_execute(self): # This Migration cannot rely on the default-mechanism as the migration_framework was not # in place when the functionality has been implemented return True @property def description(self) -> str: return """Single-Node migration: In v1.3.1 Single Node implementation has been implemented Later we had multiple nodes. This migrates the single installation to one of many. Effectively it will: * Check whether an internal node was existing in ~/.specter/.bitcoin * Check whether a new internal default node (bitcoin/main) is NOT existing * Move the ~/.specter/.bitcoin to ~/.specter/nodes/specter_bitcoin/.bitcoin-main * Creates a json-definition in ~/.specter/nodes/specter_bitcoin.json """ def execute(self): source_folder = os.path.join(self.data_folder, ".bitcoin") if not os.path.isdir(source_folder): logger.info( "No .bitcoin directory found in {self.data_folder}. Nothing to do" ) return if not os.path.isdir(os.path.join(self.data_folder, "bitcoin-binaries")): raise SpecterError( "Could not proceed with migration as bitcoin-binaries are not existing." ) if not self.METHOD_NAME(): logger.error( "There is already a Node with the default port configured or running. Won't migrate!" ) return # The version will be the version shipped with specter bitcoin_version = BaseConfig.INTERNAL_BITCOIND_VERSION logger.info(f".bitcoin directory detected in {self.data_folder}. Migrating ...") recommended_name = self._find_appropriate_name() target_folder = os.path.join(self.data_folder, "nodes", recommended_name) logger.info(f"Migrating to folder {target_folder}") os.makedirs(target_folder) logger.info(f"Moving .bitcoin to folder {target_folder}") shutil.move(source_folder, os.path.join(target_folder, ".bitcoin-main")) if os.path.isdir(os.path.join(source_folder, "bitcoin.conf")): logger.info("Removing bitcoin.conf file") os.remove(os.path.join(source_folder, "bitcoin.conf")) definition_file = os.path.join(target_folder, "specter_bitcoin.json") logger.info( f"Creating {definition_file}. This will cause some warnings and even errors about not being able to connect to the node which can be ignored." ) nm = NodeManager( data_folder=os.path.join(self.data_folder, "nodes"), bitcoind_path=os.path.join( self.data_folder, "bitcoin-binaries", "bin", "bitcoind" ), internal_bitcoind_version=bitcoin_version, ) # Should create a json (see fullpath) like the one below: node = nm.add_internal_node(recommended_name) # { # "name": "Specter Bitcoin", # "alias": "specter_bitcoin", # "autodetect": false, # "datadir": "/home/someuser/.specter/nodes/specter_bitcoin/.bitcoin-main", # "user": "bitcoin", # "password": "3ah0yc-2dDEwUSqHuuZi-w", # "port": 8332, # "host": "localhost", # "protocol": "http", # "external_node": false, # "fullpath": "/home/someuser/.specter/nodes/specter_bitcoin.json", # "bitcoind_path": "/home/someuser/.specter/bitcoin-binaries/bin/bitcoind", # "bitcoind_network": "main", # "version": "0.21.1" # } def _find_appropriate_name(self): if not os.path.isdir(os.path.join(self.data_folder, "nodes")): return "specter_bitcoin" if not os.path.isdir( os.path.join(self.data_folder, "nodes", "specter_bitcoin") ): return "specter_bitcoin" # Hmm, now it gets a bit trieckier if not os.path.isdir( os.path.join(self.data_folder, "nodes", "specter_migrated") ): return "specter_migrated" # Now it's getting fishy raise SpecterError( "I found a node called 'specter_migrated'. This migration script should not run twice." ) def METHOD_NAME(self, port=8332): # For external nodes, we assume that there are already running try: result = requests.get(f"http://localhost:{port}") return False except (ConnectionRefusedError, ConnectionError, NewConnectionError): pass # Now let's check internal Nodes if os.path.isfile(os.path.join(self.data_folder, "nodes")): configs = load_jsons(os.path.join(self.data_folder, "nodes")) ports = [node.port for node in configs.keys()] if port in ports: return False return True
5,649
get tags
from functools import partial from i18nfield.rest_framework import I18nAwareModelSerializer from rest_framework.serializers import ( ModelSerializer, SerializerMethodField, SlugRelatedField, ) from pretalx.api.serializers.question import AnswerSerializer from pretalx.api.serializers.speaker import SubmitterOrgaSerializer, SubmitterSerializer from pretalx.schedule.models import Schedule, TalkSlot from pretalx.submission.models import Resource, Submission, SubmissionStates, Tag class ResourceSerializer(ModelSerializer): resource = SerializerMethodField() @staticmethod def get_resource(obj): return obj.url class Meta: model = Resource fields = ("resource", "description") class SlotSerializer(I18nAwareModelSerializer): room = SlugRelatedField(slug_field="name", read_only=True) end = SerializerMethodField() @staticmethod def get_end(obj): return obj.local_end class Meta: model = TalkSlot fields = ("room_id", "room", "start", "end") class BreakSerializer(SlotSerializer): class Meta: model = TalkSlot fields = ("room", "room_id", "start", "end", "description") class SubmissionSerializer(I18nAwareModelSerializer): submission_type = SlugRelatedField(slug_field="name", read_only=True) track = SlugRelatedField(slug_field="name", read_only=True) slot = SlotSerializer( TalkSlot.objects.none().filter(is_visible=True), read_only=True ) duration = SerializerMethodField() speakers = SerializerMethodField() resources = ResourceSerializer(Resource.objects.none(), read_only=True, many=True) title = SerializerMethodField() abstract = SerializerMethodField() description = SerializerMethodField() speaker_serializer_class = SubmitterSerializer @staticmethod def get_duration(obj): return obj.get_duration() def get_speakers(self, obj): has_slots = ( obj.slots.filter(is_visible=True) and obj.state == SubmissionStates.CONFIRMED ) if has_slots or self.can_view_speakers: return self.speaker_serializer_class( obj.speakers.all(), many=True, context=self.context, event=self.event, ).data return [] def get_attribute(self, obj, attribute=None): if self.can_view_speakers: return getattr(obj, attribute, None) return obj.anonymised.get(attribute) or getattr(obj, attribute, None) def __init__(self, *args, **kwargs): self.can_view_speakers = kwargs.pop("can_view_speakers", False) self.event = kwargs.pop("event", None) questions = kwargs.pop("questions", []) self.questions = ( questions if questions == "all" else [q for q in questions if q] ) super().__init__(*args, **kwargs) for field in ("title", "abstract", "description"): setattr(self, f"get_{field}", partial(self.get_attribute, attribute=field)) class Meta: model = Submission fields = [ "code", "speakers", "title", "submission_type", "submission_type_id", "track", "track_id", "state", "abstract", "description", "duration", "slot_count", "do_not_record", "is_featured", "content_locale", "slot", "image", "resources", ] class TagSerializer(I18nAwareModelSerializer): class Meta: model = Tag fields = ["id", "tag", "description", "color"] class SubmissionOrgaSerializer(SubmissionSerializer): answers = SerializerMethodField() tags = SerializerMethodField() tag_ids = SerializerMethodField() created = SerializerMethodField() speaker_serializer_class = SubmitterOrgaSerializer def answers_queryset(self, obj): return obj.answers.all() def get_answers(self, obj): if not self.questions: return [] queryset = self.answers_queryset(obj) if self.questions not in ["all", ["all"]]: queryset = queryset.filter(question__in=self.questions) return AnswerSerializer(queryset, many=True).data def get_created(self, obj): return obj.created.astimezone(obj.event.tz).isoformat() def METHOD_NAME(self, obj): return list(obj.tags.all().values_list("tag", flat=True)) def get_tag_ids(self, obj): return list(obj.tags.all().values_list("id", flat=True)) class Meta(SubmissionSerializer.Meta): fields = SubmissionSerializer.Meta.fields + [ "created", "pending_state", "answers", "notes", "internal_notes", "tags", "tag_ids", ] class SubmissionReviewerSerializer(SubmissionOrgaSerializer): def answers_queryset(self, obj): return obj.reviewer_answers.all() class Meta(SubmissionOrgaSerializer.Meta): pass class ScheduleListSerializer(ModelSerializer): version = SerializerMethodField() @staticmethod def get_version(obj): return obj.version or "wip" class Meta: model = Schedule fields = ("version", "published") class ScheduleSerializer(ModelSerializer): slots = SubmissionSerializer( Submission.objects.none().filter(state=SubmissionStates.CONFIRMED), many=True ) breaks = SerializerMethodField() @staticmethod def get_breaks(obj): return BreakSerializer(obj.breaks, many=True).data class Meta: model = Schedule fields = ("slots", "version", "breaks")
5,650
is blacklisted tool
#!/usr/bin/env python3 import argparse import json import logging import sys from pathlib import Path from typing import Dict, Generator, Iterable, List, Optional, Tuple, Union, NamedTuple from jsoncomparison import NO_DIFF, Compare from aqt.exceptions import ArchiveConnectionError, ArchiveDownloadError from aqt.helper import Settings, setup_logging from aqt.metadata import ArchiveId, MetadataFactory, Versions def METHOD_NAME(tool_name: str) -> bool: for prefix in ("tools_qt3dstudio_",): if tool_name.startswith(prefix): return True for suffix in ("_preview", "_early_access"): if tool_name.endswith(suffix): return True return False def iter_archive_ids( *, category: str, hosts: Iterable[str] = ArchiveId.HOSTS, targets: Optional[Iterable[str]] = None, ) -> Generator[ArchiveId, None, None]: for host in sorted(hosts): use_targets = targets if use_targets is None: use_targets = ArchiveId.TARGETS_FOR_HOST[host] for target in use_targets: yield ArchiveId(category, host, target) def iter_arches() -> Generator[dict, None, None]: logger.info("Fetching arches") archive_ids = list(iter_archive_ids(category="qt")) for archive_id in tqdm(archive_ids): for version in ("latest", "5.15.2", "5.13.2", "5.9.9"): for arch_name in MetadataFactory(archive_id, architectures_ver=version).getList(): yield { "os_name": archive_id.host, "target": archive_id.target, "arch": arch_name, } def iter_tool_variants() -> Generator[dict, None, None]: for archive_id in iter_archive_ids(category="tools"): logger.info("Fetching tool variants for {}".format(archive_id)) for tool_name in tqdm(sorted(MetadataFactory(archive_id).fetch_tools())): if METHOD_NAME(tool_name): continue for tool_variant in MetadataFactory( archive_id, tool_name=tool_name ).getList(): yield { "os_name": archive_id.host, "target": archive_id.target, "tool_name": tool_name, "arch": tool_variant, } def iter_qt_minor_groups( host: str = "linux", target: str = "desktop" ) -> Generator[Tuple[int, int], None, None]: versions: Versions = MetadataFactory(ArchiveId("qt", host, target)).fetch_versions() for minor_group in versions: v = minor_group[0] yield v.major, v.minor def iter_modules_for_qt_minor_groups( host: str = "linux", target: str = "desktop", arch: str = "gcc_64" ) -> Generator[Dict, None, None]: logger.info("Fetching qt modules for {}/{}".format(host, target)) for major, minor in tqdm(list(iter_qt_minor_groups(host, target))): yield { "qt_version": f"{major}.{minor}", "modules": MetadataFactory( ArchiveId("qt", host, target), modules_query=MetadataFactory.ModulesQuery(f"{major}.{minor}.0", arch) ).getList(), } def list_qt_versions(host: str = "linux", target: str = "desktop") -> List[str]: all_versions = list() versions: Versions = MetadataFactory(ArchiveId("qt", host, target)).getList() for minor_group in versions: all_versions.extend([str(ver) for ver in minor_group]) return all_versions def merge_records(arch_records) -> List[Dict]: all_records: List[Dict] = [] hashes = set() for record in arch_records: _hash = record["os_name"], record["target"], record["arch"] if _hash not in hashes: all_records.append(record) hashes.add(_hash) for sorting_key in ("arch", "target", "os_name"): all_records = sorted(all_records, key=lambda d: d[sorting_key]) return all_records def generate_combos(new_archive: List[str]): return { "qt": merge_records(iter_arches()), "tools": list(iter_tool_variants()), "modules": list(iter_modules_for_qt_minor_groups()), "versions": list_qt_versions(), "new_archive": new_archive, } def alphabetize_modules(combos: Dict[str, Union[List[Dict], List[str]]]): for i, item in enumerate(combos["modules"]): combos["modules"][i]["modules"] = sorted(item["modules"]) def write_combinations_json( combos: List[Dict[str, Union[List[Dict], List[str]]]], filename: Path, ): logger.info(f"Write file {filename}") json_text = json.dumps(combos, sort_keys=True, indent=2) if filename.write_text(json_text, encoding="utf_8") == 0: raise RuntimeError("Failed to write file!") def main(filename: Path, is_write_file: bool, is_verbose: bool) -> int: try: expect = json.loads(filename.read_text()) alphabetize_modules(expect[0]) actual = [generate_combos(new_archive=expect[0]["new_archive"])] diff = Compare().check(expect, actual) if is_verbose: logger.info("=" * 80) logger.info("Program Output:") logger.info(json.dumps(actual, sort_keys=True, indent=2)) logger.info("=" * 80) logger.info(f"Comparison with existing '{filename}':") logger.info(json.dumps(diff, sort_keys=True, indent=2)) logger.info("=" * 80) if diff == NO_DIFF: logger.info(f"{filename} is up to date! No PR is necessary this time!") return 0 # no difference if is_write_file: logger.info(f"{filename} has changed; writing changes to file...") write_combinations_json(actual, filename) return 0 # File written successfully logger.warning(f"{filename} is out of date, but no changes were written") return 1 # difference reported except (ArchiveConnectionError, ArchiveDownloadError) as e: logger.error(format(e)) return 1 def get_tqdm(disable: bool): if disable: return lambda x: x from tqdm import tqdm as base_tqdm return lambda *a: base_tqdm(*a, disable=disable) if __name__ == "__main__": Settings.load_settings() setup_logging() logger = logging.getLogger("aqt.generate_combos") json_filename = Path(__file__).parent.parent / "aqt/combinations.json" parser = argparse.ArgumentParser( description="Generate combinations.json from download.qt.io, " "compare with existing file, and write file to correct differences" ) parser.add_argument( "--write", help="write to combinations.json if changes detected", action="store_true", ) parser.add_argument( "--no-tqdm", help="disable progress bars (makes CI logs easier to read)", action="store_true", ) parser.add_argument( "--verbose", help="Print a json dump of the new file, and an abbreviated diff with the old file", action="store_true", ) args = parser.parse_args() tqdm = get_tqdm(args.no_tqdm) sys.exit( main(filename=json_filename, is_write_file=args.write, is_verbose=args.verbose) )
5,651
test basic system connection
#!/usr/bin/python3 -cimport os, sys; os.execv(os.path.dirname(sys.argv[1]) + "/common/pywrap", sys.argv) # This file is part of Cockpit. # # Copyright (C) 2021 Red Hat, Inc. # # Cockpit is free software; you can redistribute it and/or modify it # under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 2.1 of the License, or # (at your option) any later version. # # Cockpit is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Cockpit; If not, see <http://www.gnu.org/licenses/>. from machineslib import VirtualMachinesCase from testlib import nondestructive, skipImage, test_main @nondestructive class TestMachinesFilesystems(VirtualMachinesCase): def METHOD_NAME(self): self._testBasic() def testBasicSessionConnection(self): self._testBasic("session") def _testBasic(self, connection="system"): b = self.browser m = self.machine m.execute("mkdir -p /tmp/dir1 /tmp/dir2") self.login_and_go("/machines") self.waitPageInit() self.createVm("subVmTest1", running=False, connection=connection) self.goToVmPage("subVmTest1", connectionName=connection) # wait until page initialized b.wait_visible("#vm-subVmTest1-hostdevs") if connection == "session": b.wait_not_present("#vm-subVmTest1-filesystems-add") return # Form validation b.click("#vm-subVmTest1-filesystems-add") b.click("#vm-subVmTest1-filesystems-modal-add") b.wait_visible("#vm-subVmTest1-filesystems-modal-source-helper") b.wait_visible("#vm-subVmTest1-filesystems-modal-mountTag-helper") b.click("#vm-subVmTest1-filesystems-modal-cancel") # Add a new shared filesystem b.click("#vm-subVmTest1-filesystems-add") b.set_file_autocomplete_val("#vm-subVmTest1-filesystems-modal-source-group", "/tmp/dir1/") b.set_input_text("#vm-subVmTest1-filesystems-modal-mountTag", "dir1") b.click(".pf-v5-c-expandable-section__toggle") b.wait_visible("#vm-subVmTest1-filesystems-modal-xattr:not(:checked)") b.set_checked("#vm-subVmTest1-filesystems-modal-xattr", True) b.click("#vm-subVmTest1-filesystems-modal-add") b.wait_not_present("#vm-subVmTest1-filesystems-modal-add") b.wait_visible("tr[data-row-id='vm-subVmTest1-filesystem-/tmp/dir1/-dir1']") b.wait_in_text("tr[data-row-id='vm-subVmTest1-filesystem-/tmp/dir1/-dir1'] td[data-label='Source path']", "/tmp/dir1/") b.wait_in_text("tr[data-row-id='vm-subVmTest1-filesystem-/tmp/dir1/-dir1'] td[data-label='Mount tag']", "dir1") domain_xml = "virsh -c qemu:///system dumpxml subVmTest1" xmllint_element = f"{domain_xml} | xmllint --xpath 'string(//domain/{{prop}})' - 2>&1 || true" self.assertEqual('/tmp/dir1/', self.machine.execute(xmllint_element.format(prop='devices/filesystem/source/@dir')).strip()) self.assertEqual('dir1', self.machine.execute(xmllint_element.format(prop='devices/filesystem/target/@dir')).strip()) self.assertEqual('on', self.machine.execute(xmllint_element.format(prop='devices/filesystem/binary/@xattr')).strip()) self.assertEqual('shared', self.machine.execute(xmllint_element.format(prop='memoryBacking/access/@mode')).strip()) # Add a new shared filesystem - now the memoryBacking is configured and hidden from the dialog b.click("#vm-subVmTest1-filesystems-add") b.set_file_autocomplete_val("#vm-subVmTest1-filesystems-modal-source-group", "/tmp/dir2/") b.set_input_text("#vm-subVmTest1-filesystems-modal-mountTag", "dir2") b.click(".pf-v5-c-expandable-section__toggle") b.set_checked("#vm-subVmTest1-filesystems-modal-xattr", False) b.click("#vm-subVmTest1-filesystems-modal-add") b.wait_not_present("#vm-subVmTest1-filesystems-modal-add") b.wait_visible("tr[data-row-id='vm-subVmTest1-filesystem-/tmp/dir2/-dir2']") b.wait_in_text("tr[data-row-id='vm-subVmTest1-filesystem-/tmp/dir2/-dir2'] td[data-label='Source path']", "/tmp/dir2/") b.wait_in_text("tr[data-row-id='vm-subVmTest1-filesystem-/tmp/dir2/-dir2'] td[data-label='Mount tag']", "dir2") self.assertEqual('/tmp/dir2/', self.machine.execute(xmllint_element.format(prop='devices/filesystem[2]/source/@dir')).strip()) self.assertEqual('dir2', self.machine.execute(xmllint_element.format(prop='devices/filesystem[2]/target/@dir')).strip()) self.assertEqual('', self.machine.execute(xmllint_element.format(prop='devices/filesystem[2]/binary/@xattr')).strip()) # Try to add a new shared filesystem with the same mount tag b.click("#vm-subVmTest1-filesystems-add") b.set_file_autocomplete_val("#vm-subVmTest1-filesystems-modal-source-group", "/tmp/dir1/") b.set_input_text("#vm-subVmTest1-filesystems-modal-mountTag", "dir1") b.click("#vm-subVmTest1-filesystems-modal-add") b.wait_in_text(".pf-v5-c-alert", "Failed to add shared directory") b.wait_in_text(".pf-v5-c-alert", "filesystem target 'dir1' specified twice") b.click("#vm-subVmTest1-filesystems-modal-cancel") # Try to add a new shared filesystem with non existing source directory b.click("#vm-subVmTest1-filesystems-add") b.set_input_text("#vm-subVmTest1-filesystems-modal-source-group input", "dir3") b.wait_in_text("#vm-subVmTest1-filesystems-modal-source", "No such file or directory") b.click("#vm-subVmTest1-filesystems-modal-cancel") # Start VM and ensure that adding filesystem is disabled b.click("#vm-subVmTest1-system-run") b.wait_visible("#vm-subVmTest1-filesystems-add[aria-disabled=true]") @skipImage("Older libvirt does not support virtiofs", "rhel-8-6", "centos-8-stream") def testDelete(self): b = self.browser m = self.machine m.execute("mkdir -p /tmp/dir1") self.createVm("subVmTest1", running=False) self.login_and_go("/machines") self.waitPageInit() self.goToVmPage("subVmTest1") m.execute("virt-xml subVmTest1 --add-device --filesystem source=/tmp/dir1/,target=dir1") b.wait_visible("tr[data-row-id='vm-subVmTest1-filesystem-/tmp/dir1/-dir1']") b.click("tr[data-row-id='vm-subVmTest1-filesystem-/tmp/dir1/-dir1'] button:contains(Remove)") b.wait_in_text(".pf-v5-c-modal-box__body .pf-v5-c-description-list", "removed from subVmTest1") b.wait_in_text("#delete-resource-modal-source-path", "/tmp/dir1/") b.wait_in_text("#delete-resource-modal-mount-tag", "dir1") b.click("#delete-resource-modal button:contains(Remove)") b.wait_not_present("tr[data-row-id='vm-subVmTest1-filesystem-/tmp/dir1/-dir1']") if __name__ == '__main__': test_main()
5,652
prepare request
# pylint: disable=too-many-lines # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, Callable, Dict, Iterable, Optional, TypeVar import urllib.parse from azure.core.exceptions import ( ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, ResourceNotModifiedError, map_error, ) from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpResponse from azure.core.rest import HttpRequest from azure.core.tracing.decorator import distributed_trace from azure.core.utils import case_insensitive_dict from azure.mgmt.core.exceptions import ARMErrorFormat from .. import models as _models from ..._serialization import Serializer from .._vendor import _convert_request T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False def build_list_request(**kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2018-11-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop("template_url", "/providers/Microsoft.Storage/operations") # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) class Operations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.mgmt.storage.v2018_11_01.StorageManagementClient`'s :attr:`operations` attribute. """ models = _models def __init__(self, *args, **kwargs): input_args = list(args) self._client = input_args.pop(0) if input_args else kwargs.pop("client") self._config = input_args.pop(0) if input_args else kwargs.pop("config") self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") @distributed_trace def list(self, **kwargs: Any) -> Iterable["_models.Operation"]: """Lists all of the available Storage Rest API operations. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either Operation or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.storage.v2018_11_01.models.Operation] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2018-11-01")) cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, 304: ResourceNotModifiedError, } error_map.update(kwargs.pop("error_map", {}) or {}) def METHOD_NAME(next_link=None): if not next_link: request = build_list_request( api_version=api_version, template_url=self.list.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: # make call to next link with the client's api-version _parsed_next_link = urllib.parse.urlparse(next_link) _next_request_params = case_insensitive_dict( { key: [urllib.parse.quote(v) for v in value] for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) _next_request_params["api-version"] = self._config.api_version request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request def extract_data(pipeline_response): deserialized = self._deserialize("OperationListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) # type: ignore return None, iter(list_of_elem) def get_next(next_link=None): request = METHOD_NAME(next_link) _stream = False pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access request, stream=_stream, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return ItemPaged(get_next, extract_data) list.metadata = {"url": "/providers/Microsoft.Storage/operations"}
5,653
verify registration response
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 import json import webauthn as pywebauthn from webauthn.helpers import base64url_to_bytes, generate_challenge from webauthn.helpers.exceptions import ( InvalidAuthenticationResponse, InvalidAuthenticatorDataStructure, InvalidRegistrationResponse, UnsupportedPublicKeyType, ) from webauthn.helpers.options_to_json import options_to_json from webauthn.helpers.structs import ( AttestationConveyancePreference, AuthenticationCredential, AuthenticatorSelectionCriteria, PublicKeyCredentialDescriptor, RegistrationCredential, UserVerificationRequirement, ) class AuthenticationRejectedError(Exception): pass class RegistrationRejectedError(Exception): pass def _get_webauthn_user_public_key_credential_descriptors(user, *, rp_id): """ Returns a webauthn.WebAuthnUser instance corresponding to the given user model, with properties suitable for usage within the webauthn API. """ return [ PublicKeyCredentialDescriptor(id=base64url_to_bytes(credential.credential_id)) for credential in user.webauthn ] def _get_webauthn_user_public_keys(user, *, rp_id): return [ ( base64url_to_bytes(credential.public_key), credential.sign_count, ) for credential in user.webauthn ] def _webauthn_b64encode(source): return base64.urlsafe_b64encode(source).rstrip(b"=") def generate_webauthn_challenge(): """ Returns a random challenge suitable for use within Webauthn's credential and configuration option objects. See: https://w3c.github.io/webauthn/#cryptographic-challenges """ return generate_challenge() def get_credential_options(user, *, challenge, rp_name, rp_id): """ Returns a dictionary of options for credential creation on the client side. """ _authenticator_selection = AuthenticatorSelectionCriteria() _authenticator_selection.user_verification = UserVerificationRequirement.DISCOURAGED options = pywebauthn.generate_registration_options( rp_id=rp_id, rp_name=rp_name, user_id=str(user.id), user_name=user.username, user_display_name=user.name or user.username, challenge=challenge, attestation=AttestationConveyancePreference.NONE, authenticator_selection=_authenticator_selection, ) return json.loads(options_to_json(options)) def get_assertion_options(user, *, challenge, rp_id): """ Returns a dictionary of options for assertion retrieval on the client side. """ options = pywebauthn.generate_authentication_options( rp_id=rp_id, challenge=challenge, allow_credentials=_get_webauthn_user_public_key_credential_descriptors( user, rp_id=rp_id ), user_verification=UserVerificationRequirement.DISCOURAGED, ) return json.loads(options_to_json(options)) def METHOD_NAME(response, challenge, *, rp_id, origin): """ Validates the challenge and attestation information sent from the client during device registration. Returns a WebAuthnCredential on success. Raises RegistrationRejectedError on failire. """ # NOTE: We re-encode the challenge below, because our # response's clientData.challenge is encoded twice: # first for the entire clientData payload, and then again # for the individual challenge. encoded_challenge = _webauthn_b64encode(challenge) try: _credential = RegistrationCredential.parse_raw(response) return pywebauthn.METHOD_NAME( credential=_credential, expected_challenge=encoded_challenge, expected_rp_id=rp_id, expected_origin=origin, require_user_verification=False, ) except ( InvalidAuthenticatorDataStructure, InvalidRegistrationResponse, UnsupportedPublicKeyType, ) as e: raise RegistrationRejectedError(str(e)) def verify_assertion_response(assertion, *, challenge, user, origin, rp_id): """ Validates the challenge and assertion information sent from the client during authentication. Returns an updated signage count on success. Raises AuthenticationRejectedError on failure. """ # NOTE: We re-encode the challenge below, because our # response's clientData.challenge is encoded twice: # first for the entire clientData payload, and then again # for the individual challenge. encoded_challenge = _webauthn_b64encode(challenge) webauthn_user_public_keys = _get_webauthn_user_public_keys(user, rp_id=rp_id) for public_key, current_sign_count in webauthn_user_public_keys: try: _credential = AuthenticationCredential.parse_raw(assertion) return pywebauthn.verify_authentication_response( credential=_credential, expected_challenge=encoded_challenge, expected_rp_id=rp_id, expected_origin=origin, credential_public_key=public_key, credential_current_sign_count=current_sign_count, require_user_verification=False, ) except InvalidAuthenticationResponse: pass # If we exit the loop, then we've failed to verify the assertion against # any of the user's WebAuthn credentials. Fail. raise AuthenticationRejectedError("Invalid WebAuthn credential")
5,654
test msmarco passage tct colbert v2 encoded
# # Pyserini: Reproducible IR research with sparse and dense representations # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Integration tests for TCT-ColBERTv2 models using on-the-fly query encoding.""" import os import socket import unittest from integrations.utils import clean_files, run_command, parse_score from pyserini.search import QueryEncoder from pyserini.search import get_topics class TestTctColBertV2(unittest.TestCase): def setUp(self): self.temp_files = [] self.threads = 16 self.batch_size = 256 # Hard-code larger values for internal servers if socket.gethostname().startswith('damiano') or socket.gethostname().startswith('orca'): self.threads = 36 self.batch_size = 144 def test_msmarco_passage_tct_colbert_v2_bf_otf(self): output_file = 'test_run.msmarco-passage.tct_colbert-v2.bf-otf.tsv' self.temp_files.append(output_file) cmd1 = f'python -m pyserini.search.faiss --topics msmarco-passage-dev-subset \ --index msmarco-v1-passage.tct_colbert-v2 \ --encoder castorini/tct_colbert-v2-msmarco \ --batch-size {self.batch_size} \ --threads {self.threads} \ --output {output_file} \ --output-format msmarco' cmd2 = f'python -m pyserini.eval.msmarco_passage_eval msmarco-passage-dev-subset {output_file}' status = os.system(cmd1) stdout, stderr = run_command(cmd2) score = parse_score(stdout, "MRR @10") self.assertEqual(status, 0) self.assertAlmostEqual(score, 0.3440, delta=0.0001) def test_msmarco_passage_tct_colbert_v2_hn_otf(self): output_file = 'test_run.msmarco-passage.tct_colbert-v2-hn.bf-otf.tsv' self.temp_files.append(output_file) cmd1 = f'python -m pyserini.search.faiss --topics msmarco-passage-dev-subset \ --index msmarco-v1-passage.tct_colbert-v2-hn \ --encoder castorini/tct_colbert-v2-hn-msmarco \ --batch-size {self.batch_size} \ --threads {self.threads} \ --output {output_file} \ --output-format msmarco' cmd2 = f'python -m pyserini.eval.msmarco_passage_eval msmarco-passage-dev-subset {output_file}' status = os.system(cmd1) stdout, stderr = run_command(cmd2) score = parse_score(stdout, "MRR @10") self.assertEqual(status, 0) self.assertAlmostEqual(score, 0.3543, delta=0.0001) def test_msmarco_passage_tct_colbert_v2_hnp_bf_bm25_hybrid_otf(self): output_file = 'test_run.msmarco-passage.tct_colbert-v2-hnp.bf-otf.bm25.tsv' self.temp_files.append(output_file) cmd1 = f'python -m pyserini.search.hybrid dense --index msmarco-v1-passage.tct_colbert-v2-hnp \ --encoder castorini/tct_colbert-v2-hnp-msmarco \ sparse --index msmarco-v1-passage \ fusion --alpha 0.06 \ run --topics msmarco-passage-dev-subset \ --output {output_file} \ --batch-size {self.batch_size} --threads {self.threads} \ --output-format msmarco' cmd2 = f'python -m pyserini.eval.msmarco_passage_eval msmarco-passage-dev-subset {output_file}' status = os.system(cmd1) stdout, stderr = run_command(cmd2) score = parse_score(stdout, "MRR @10") self.assertEqual(status, 0) self.assertAlmostEqual(score, 0.3682, delta=0.0001) def test_msmarco_passage_tct_colbert_v2_hnp_bf_d2q_hybrid_otf(self): output_file = 'test_run.msmarco-passage.tct_colbert-v2-hnp.bf-otf.doc2queryT5.tsv' self.temp_files.append(output_file) cmd1 = f'python -m pyserini.search.hybrid dense --index msmarco-v1-passage.tct_colbert-v2-hnp \ --encoder castorini/tct_colbert-v2-hnp-msmarco \ sparse --index msmarco-v1-passage-d2q-t5 \ fusion --alpha 0.1 \ run --topics msmarco-passage-dev-subset \ --output {output_file} \ --batch-size {self.batch_size} --threads {self.threads} \ --output-format msmarco' cmd2 = f'python -m pyserini.eval.msmarco_passage_eval msmarco-passage-dev-subset {output_file}' status = os.system(cmd1) stdout, stderr = run_command(cmd2) score = parse_score(stdout, "MRR @10") self.assertEqual(status, 0) self.assertAlmostEqual(score, 0.3731, delta=0.0001) def METHOD_NAME(self): encoded = QueryEncoder.load_encoded_queries('tct_colbert-v2-msmarco-passage-dev-subset') topics = get_topics('msmarco-passage-dev-subset') for t in topics: self.assertTrue(topics[t]['title'] in encoded.embedding) def test_msmarco_passage_tct_colbert_v2_hn_encoded_queries(self): encoded = QueryEncoder.load_encoded_queries('tct_colbert-v2-hn-msmarco-passage-dev-subset') topics = get_topics('msmarco-passage-dev-subset') for t in topics: self.assertTrue(topics[t]['title'] in encoded.embedding) def test_msmarco_passage_tct_colbert_v2_hnp_encoded_queries(self): encoded = QueryEncoder.load_encoded_queries('tct_colbert-v2-hnp-msmarco-passage-dev-subset') topics = get_topics('msmarco-passage-dev-subset') for t in topics: self.assertTrue(topics[t]['title'] in encoded.embedding) def tearDown(self): clean_files(self.temp_files) if __name__ == '__main__': unittest.main()
5,655
test path isin case sensitive
import os import pytest import dvc from dvc.fs import system from dvc.utils import relpath from dvc.utils.fs import ( BasePathNotInCheckedPathException, contains_symlink_up_to, path_isin, remove, ) def test_should_raise_exception_on_base_path_not_in_path(): with pytest.raises(BasePathNotInCheckedPathException): contains_symlink_up_to(os.path.join("foo", "path"), "bar") def test_should_return_true_on_symlink_in_path(mocker): mocker.patch.object(system, "is_symlink", return_value=True) base_path = "foo" path = os.path.join(base_path, "bar") assert contains_symlink_up_to(path, base_path) def test_should_return_false_on_path_eq_to_base_path(mocker): mocker.patch.object(system, "is_symlink", return_value=False) path = "path" assert not contains_symlink_up_to(path, path) def test_should_return_false_on_no_more_dirs_below_path(mocker): mocker.patch.object(system, "is_symlink", return_value=False) dirname_patch = mocker.patch.object(os.path, "dirname", side_effect=lambda arg: arg) assert not contains_symlink_up_to(os.path.join("foo", "path"), "foo") dirname_patch.assert_called_once() def test_should_return_false_when_base_path_is_symlink(mocker): base_path = "foo" target_path = os.path.join(base_path, "bar") def base_path_is_symlink(path): if path == base_path: return True return False mocker.patch.object( system, "is_symlink", return_value=True, side_effect=base_path_is_symlink, ) assert not contains_symlink_up_to(target_path, base_path) def test_path_object_and_str_are_valid_arg_types(): base_path = "foo" target_path = os.path.join(base_path, "bar") assert not contains_symlink_up_to(target_path, base_path) assert not contains_symlink_up_to(target_path, base_path) def test_should_call_recursive_on_no_condition_matched(mocker): mocker.patch.object(system, "is_symlink", return_value=False) contains_symlink_spy = mocker.spy(dvc.utils.fs, "contains_symlink_up_to") # call from full path to match contains_symlink_spy patch path assert not dvc.utils.fs.contains_symlink_up_to(os.path.join("foo", "path"), "foo") assert contains_symlink_spy.mock.call_count == 2 @pytest.mark.skipif(os.name != "nt", reason="Windows specific") def test_relpath_windows_different_drives(): path1 = os.path.join("A:", os.sep, "some", "path") path2 = os.path.join("B:", os.sep, "other", "path") assert relpath(path1, path2) == path1 rel = relpath(path1, path2) assert isinstance(rel, str) assert rel == path1 def test_remove(tmp_dir): tmp_dir.gen({"foo": "foo content"}) path = "foo" remove(path) assert not os.path.isfile(path) def test_path_isin_positive(): child = os.path.join("path", "to", "folder") assert path_isin(child, os.path.join("path", "to", "")) assert path_isin(child, os.path.join("path", "to")) assert path_isin(child, os.path.join("path", "")) assert path_isin(child, os.path.join("path")) def test_path_isin_on_same_path(): path = os.path.join("path", "to", "folder") path_with_sep = os.path.join(path, "") assert not path_isin(path, path) assert not path_isin(path, path_with_sep) assert not path_isin(path_with_sep, path) assert not path_isin(path_with_sep, path_with_sep) def test_path_isin_on_common_substring_path(): path1 = os.path.join("path", "to", "folder1") path2 = os.path.join("path", "to", "folder") assert not path_isin(path1, path2) def test_path_isin_with_absolute_path(): parent = os.path.abspath("path") child = os.path.join(parent, "to", "folder") assert path_isin(child, parent) def METHOD_NAME(): child = os.path.join("path", "to", "folder") parent = os.path.join("PATH", "TO") assert path_isin(child, parent) == (os.name == "nt") @pytest.mark.skipif(os.name != "nt", reason="Windows specific") def test_contains_symlink_case_sensitive_win(): child = os.path.join("path", "to", "folder") parent = os.path.join("PATH", "TO") assert contains_symlink_up_to(child, parent) is False @pytest.mark.skipif(os.name == "nt", reason="Posix specific") def test_contains_symlink_case_sensitive_posix(): child = os.path.join("path", "to", "folder") parent = os.path.join("PATH", "TO") with pytest.raises(BasePathNotInCheckedPathException): contains_symlink_up_to(child, parent) def test_makedirs(tmp_dir): path = os.path.join(tmp_dir, "directory") os.makedirs(path) assert os.path.isdir(path)
5,656
handle result
############################################################################# # Copyright (C) 2020-2021 German Aerospace Center (DLR-SC) # # Authors: # # Contact: Martin J. Kuehn <Martin.Kuehn@DLR.de> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################# import argparse import numpy as np import memilio.simulation as mio import memilio.simulation.secir as secir def parameter_study(): # setup basic parameters num_groups = 6 model = secir.Model(num_groups) for i in range(num_groups): group = secir.AgeGroup(i) model.parameters.IncubationTime[group] = 5.2 model.parameters.TimeInfectedSymptoms[group] = 6 model.parameters.SerialInterval[group] = 4.2 model.parameters.TimeInfectedSevere[group] = 12 model.parameters.TimeInfectedCritical[group] = 8 model.populations[group, secir.InfectionState.Exposed] = 100 model.populations[group, secir.InfectionState.InfectedNoSymptoms] = 50 model.populations[group, secir.InfectionState.InfectedSymptoms] = 20 model.populations[group, secir.InfectionState.InfectedSevere] = 20 model.populations[group, secir.InfectionState.InfectedCritical] = 10 model.populations[group, secir.InfectionState.Recovered] = 50 model.populations[group, secir.InfectionState.Dead] = 10 model.populations.set_difference_from_group_total_AgeGroup( (group, secir.InfectionState.Susceptible), 10000) model.parameters.TransmissionProbabilityOnContact[group].set_distribution( mio.ParameterDistributionUniform(0.1, 0.2)) model.parameters.RecoveredPerInfectedNoSymptoms[group] = 0.09 model.parameters.RiskOfInfectionFromSymptomatic[group] = 0.25 model.parameters.SeverePerInfectedSymptoms[group] = 0.2 model.parameters.CriticalPerSevere[group] = 0.25 model.parameters.DeathsPerCritical[group] = 0.3 model.parameters.ContactPatterns.cont_freq_mat = mio.ContactMatrixGroup( 4, num_groups) model.parameters.ContactPatterns.cont_freq_mat[0] = mio.ContactMatrix( np.ones((num_groups, num_groups))*0.5) model.parameters.ContactPatterns.cont_freq_mat[1] = mio.ContactMatrix( np.ones((num_groups, num_groups))*0.5) model.parameters.ContactPatterns.cont_freq_mat[2] = mio.ContactMatrix( np.ones((num_groups, num_groups))*0.5) model.parameters.ContactPatterns.cont_freq_mat[3] = mio.ContactMatrix( np.ones((num_groups, num_groups))*0.5) model.parameters.ContactPatterns.cont_freq_mat.add_damping( mio.Damping(np.ones((num_groups, num_groups))*0.7, 30.0)) print(model.parameters.ContactPatterns.cont_freq_mat[1].baseline) # process the result of one run def METHOD_NAME(graph, run_idx): group = secir.AgeGroup(0) print("run {} with infection rate {:.2G}".format(METHOD_NAME.c, graph.get_node( 0).property.model.parameters.TransmissionProbabilityOnContact[group].value)) print("compartments at t = {}:".format( graph.get_node(0).property.result.get_time(0))) print(graph.get_node(0).property.result.get_value(0)) print("compartments at t = {}:".format( graph.get_node(0).property.result.get_last_time())) print(graph.get_node(0).property.result.get_last_value()) METHOD_NAME.c += 1 METHOD_NAME.c = 0 # study the effect of different infection rates model.apply_constraints() graph = secir.ModelGraph() graph.add_node(0, model) graph.add_node(1, model) graph.add_edge(0, 1, 0.01 * np.ones(model.populations.numel()*num_groups)) graph.add_edge(1, 0, 0.01 * np.ones(model.populations.numel()*num_groups)) study = secir.ParameterStudy(graph, t0=1, tmax=10, dt=0.5, num_runs=3) study.run(METHOD_NAME) if __name__ == "__main__": arg_parser = argparse.ArgumentParser( 'parameter_studies', description='Example demonstrating ensemble runs of a SECIR model.') args = arg_parser.parse_args() parameter_study()
5,657
url
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # # Code generated by aaz-dev-tools # -------------------------------------------------------------------------------------------- # pylint: skip-file # flake8: noqa from azure.cli.core.aaz import * @register_command( "eventhubs eventhub authorization-rule list", ) class List(AAZCommand): """List the authorization rules for an Event Hub. :example: shows the list of Authorization-rules by Eventhub az eventhubs eventhub authorization-rule list --resource-group myresourcegroup --namespace-name mynamespace --eventhub-name myeventhub """ _aaz_info = { "version": "2023-01-01-preview", "resources": [ ["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.eventhub/namespaces/{}/eventhubs/{}/authorizationrules", "2023-01-01-preview"], ] } AZ_SUPPORT_PAGINATION = True def _handler(self, command_args): super()._handler(command_args) return self.build_paging(self._execute_operations, self._output) _args_schema = None @classmethod def _build_arguments_schema(cls, *args, **kwargs): if cls._args_schema is not None: return cls._args_schema cls._args_schema = super()._build_arguments_schema(*args, **kwargs) # define Arg Group "" _args_schema = cls._args_schema _args_schema.eventhub_name = AAZStrArg( options=["--eventhub-name"], help="The Event Hub name", required=True, fmt=AAZStrArgFormat( max_length=256, min_length=1, ), ) _args_schema.namespace_name = AAZStrArg( options=["--namespace-name"], help="The Namespace name", required=True, fmt=AAZStrArgFormat( max_length=50, min_length=6, ), ) _args_schema.resource_group = AAZResourceGroupNameArg( required=True, ) return cls._args_schema def _execute_operations(self): self.pre_operations() self.EventHubsListAuthorizationRules(ctx=self.ctx)() self.post_operations() @register_callback def pre_operations(self): pass @register_callback def post_operations(self): pass def _output(self, *args, **kwargs): result = self.deserialize_output(self.ctx.vars.instance.value, client_flatten=True) next_link = self.deserialize_output(self.ctx.vars.instance.next_link) return result, next_link class EventHubsListAuthorizationRules(AAZHttpOperation): CLIENT_TYPE = "MgmtClient" def __call__(self, *args, **kwargs): request = self.make_request() session = self.client.send_request(request=request, stream=False, **kwargs) if session.http_response.status_code in [200]: return self.on_200(session) return self.on_error(session.http_response) @property def METHOD_NAME(self): return self.client.format_url( "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules", **self.url_parameters ) @property def method(self): return "GET" @property def error_format(self): return "MgmtErrorFormat" @property def url_parameters(self): parameters = { **self.serialize_url_param( "eventHubName", self.ctx.args.eventhub_name, required=True, ), **self.serialize_url_param( "namespaceName", self.ctx.args.namespace_name, required=True, ), **self.serialize_url_param( "resourceGroupName", self.ctx.args.resource_group, required=True, ), **self.serialize_url_param( "subscriptionId", self.ctx.subscription_id, required=True, ), } return parameters @property def query_parameters(self): parameters = { **self.serialize_query_param( "api-version", "2023-01-01-preview", required=True, ), } return parameters @property def header_parameters(self): parameters = { **self.serialize_header_param( "Accept", "application/json", ), } return parameters def on_200(self, session): data = self.deserialize_http_content(session) self.ctx.set_var( "instance", data, schema_builder=self._build_schema_on_200 ) _schema_on_200 = None @classmethod def _build_schema_on_200(cls): if cls._schema_on_200 is not None: return cls._schema_on_200 cls._schema_on_200 = AAZObjectType() _schema_on_200 = cls._schema_on_200 _schema_on_200.next_link = AAZStrType( serialized_name="nextLink", ) _schema_on_200.value = AAZListType() value = cls._schema_on_200.value value.Element = AAZObjectType() _element = cls._schema_on_200.value.Element _element.id = AAZStrType( flags={"read_only": True}, ) _element.location = AAZStrType( flags={"read_only": True}, ) _element.name = AAZStrType( flags={"read_only": True}, ) _element.properties = AAZObjectType( flags={"client_flatten": True}, ) _element.system_data = AAZObjectType( serialized_name="systemData", flags={"read_only": True}, ) _element.type = AAZStrType( flags={"read_only": True}, ) properties = cls._schema_on_200.value.Element.properties properties.rights = AAZListType( flags={"required": True}, ) rights = cls._schema_on_200.value.Element.properties.rights rights.Element = AAZStrType() system_data = cls._schema_on_200.value.Element.system_data system_data.created_at = AAZStrType( serialized_name="createdAt", ) system_data.created_by = AAZStrType( serialized_name="createdBy", ) system_data.created_by_type = AAZStrType( serialized_name="createdByType", ) system_data.last_modified_at = AAZStrType( serialized_name="lastModifiedAt", ) system_data.last_modified_by = AAZStrType( serialized_name="lastModifiedBy", ) system_data.last_modified_by_type = AAZStrType( serialized_name="lastModifiedByType", ) return cls._schema_on_200 class _ListHelper: """Helper class for List""" __all__ = ["List"]
5,658
get options label
# tool_rotate.py # # Copyright 2018-2023 Romain F. T. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import math from gi.repository import Gdk from .abstract_transform_tool import AbstractCanvasTool from .optionsbar_rotate import OptionsBarRotate class ToolRotate(AbstractCanvasTool): __gtype_name__ = 'ToolRotate' def __init__(self, window): super().__init__('rotate', _("Rotate"), 'tool-rotate-symbolic', window) self.cursor_name = 'pointer' self.flip_h = False self.flip_v = False self.angle_press = 0 self.add_tool_action_simple('rotate-clockwise', self.on_right_clicked) self.add_tool_action_simple('rotate-counter-cw', self.on_left_clicked) self.add_tool_action_simple('rotate-flip-h', self.on_horizontal_clicked) self.add_tool_action_simple('rotate-flip-v', self.on_vertical_clicked) def try_build_pane(self): self.pane_id = 'rotate' self.window.options_manager.try_add_bottom_pane(self.pane_id, self) def build_bottom_pane(self): pane = OptionsBarRotate(self) self.angle_btn = pane.angle_btn self.angle_btn.connect('value-changed', self.on_angle_changed) return pane def METHOD_NAME(self): return _("Rotating options") def get_editing_tips(self): if self.apply_to_selection: label_action = _("Rotating the selection") label_confirm = None else: label_action = _("Rotating the canvas") label_confirm = self.label + " - " + \ _("Don't forget to confirm the operation!") # there is intentionally no `label_modifier_shift` because it would be # too hard to explain the actions of each mouse button. full_list = [label_action, label_confirm] return list(filter(None, full_list)) def on_tool_selected(self, *args): super().on_tool_selected() self.flip_h = False self.flip_v = False self.angle_btn.set_value(0.0) self.build_and_do_op() # Show the temp_pixbuf before any event if self.apply_to_selection: self.cursor_name = 'move' # not the ideal cursor, but their is no ideal cursor for this else: self.cursor_name = 'pointer' # the pane is updated by the window according to self.apply_to_selection ############################################################################ def on_press_on_area(self, event, surface, event_x, event_y): self.update_modifier_state(event.state) if 'SHIFT' in self._modifier_keys: if event.button == 1: self.on_horizontal_clicked() elif event.button == 3: self.on_vertical_clicked() return elif not self.apply_to_selection: if event.button == 1: self.on_left_clicked() elif event.button == 3: self.on_right_clicked() return center_x, center_y = self.get_selection().get_center_coords() delta_x0 = center_x - event_x delta_y0 = center_y - event_y press_as_degrees = (math.atan2(delta_x0, delta_y0) * 180) / math.pi self.angle_press = self.get_angle() - int(press_as_degrees) def on_motion_on_area(self, event, surface, event_x, event_y, render=True): if not self.apply_to_selection: return center_x, center_y = self.get_selection().get_center_coords() delta_x = center_x - event_x delta_y = center_y - event_y release_angle = ( math.atan2(delta_x, delta_y) * 180 ) / math.pi self.angle_btn.set_value(int(release_angle) + self.angle_press) if render: operation = self.build_operation() self.do_tool_operation(operation) def on_release_on_area(self, event, surface, event_x, event_y): self.on_motion_on_area(event, surface, event_x, event_y) ############################################################################ def get_angle(self): return self.angle_btn.get_value_as_int() def on_right_clicked(self, *args): angle = self.get_normalized_angle() self.angle_btn.set_value(angle - 90) def on_left_clicked(self, *args): angle = self.get_normalized_angle() self.angle_btn.set_value(angle + 90) def on_vertical_clicked(self, *args): self.flip_v = not self.flip_v self.build_and_do_op() def on_horizontal_clicked(self, *args): self.flip_h = not self.flip_h self.build_and_do_op() def get_normalized_angle(self, *args): angle = self.get_angle() % 360 angle = int(angle/90) * 90 return angle def on_angle_changed(self, *args): if self.get_angle() == 360 or self.get_angle() == -360: self.angle_btn.set_value(0) self.build_and_do_op() ############################################################################ def on_draw_above(self, area, cairo_context): x1 = 0 y1 = 0 x2 = x1 + self.get_image().temp_pixbuf.get_width() y2 = y1 + self.get_image().temp_pixbuf.get_height() x1, x2, y1, y2 = self.get_image().get_corrected_coords(x1, x2, y1, y2, \ self.apply_to_selection, False) self._draw_temp_pixbuf(cairo_context, x1, y1) ############################################################################ def build_operation(self): operation = { 'tool_id': self.id, 'is_selection': self.apply_to_selection, 'is_preview': True, 'local_dx': 0, 'local_dy': 0, 'angle': self.get_angle(), 'flip_h': self.flip_h, 'flip_v': self.flip_v } return operation def do_tool_operation(self, operation): self.start_tool_operation(operation) angle = operation['angle'] flip_h = operation['flip_h'] flip_v = operation['flip_v'] if operation['is_selection']: source_pixbuf = self.get_selection_pixbuf() else: source_pixbuf = self.get_main_pixbuf() if angle < 0: angle += 360 gdk_rotation = int(angle / 90) * 90 cairo_rotation = angle % 90 # print('angle:', angle) # print('gdk_rotation:', gdk_rotation) # print('cairo_rotation:', cairo_rotation) new_pixbuf = source_pixbuf # Image flipping (horizontal or vertical "mirroring") if flip_h: new_pixbuf = new_pixbuf.flip(True) if flip_v: new_pixbuf = new_pixbuf.flip(False) # Image rotation, using the method from GdkPixbuf.Pixbuf new_pixbuf = new_pixbuf.rotate_simple(gdk_rotation) # Image rotation, using methods from cairo.Context (only if needed) if cairo_rotation != 0: surface0 = Gdk.cairo_surface_create_from_pixbuf(new_pixbuf, 0, None) surface0.set_device_scale(self.scale_factor(), self.scale_factor()) coefs = self._get_rotation_matrix(cairo_rotation, \ surface0.get_width(), surface0.get_height()) new_surface = self.get_resized_surface(surface0, coefs) new_surface = self.get_deformed_surface(surface0, new_surface, coefs) new_pixbuf = Gdk.pixbuf_get_from_surface(new_surface, 0, 0, \ new_surface.get_width(), new_surface.get_height()) self.get_image().set_temp_pixbuf(new_pixbuf) self.common_end_operation(operation) def _get_rotation_matrix(self, angle, width, height): """Transform an angle (in degrees) to the xx/yx/xy/yy coefs expected by cairo. Due to previously performed modifications to the data, the angle will be between 0 (excluded) and 90 (excluded).""" rad = math.pi * angle / 180 xx = math.cos(rad) xy = math.sin(rad) yx = -1 * math.sin(rad) yy = math.cos(rad) x0 = max(0, height * yx) y0 = max(0, width * xy) return [xx, yx, xy, yy, x0, y0] ############################################################################ ################################################################################
5,659
test mean variance
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for PoissonLogNormalQuadratureCompoundTest.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.distributions.python.ops import poisson_lognormal from tensorflow.contrib.distributions.python.ops import test_util from tensorflow.python.ops import array_ops from tensorflow.python.platform import test class _PoissonLogNormalQuadratureCompoundTest( test_util.DiscreteScalarDistributionTestHelpers): """Tests the PoissonLogNormalQuadratureCompoundTest distribution.""" def testSampleProbConsistent(self): with self.cached_session() as sess: pln = poisson_lognormal.PoissonLogNormalQuadratureCompound( loc=array_ops.placeholder_with_default( -2., shape=[] if self.static_shape else None), scale=array_ops.placeholder_with_default( 1.1, shape=[] if self.static_shape else None), quadrature_size=10, validate_args=True) self.run_test_sample_consistent_log_prob( sess.run, pln, batch_size=1, rtol=0.1) def METHOD_NAME(self): with self.cached_session() as sess: pln = poisson_lognormal.PoissonLogNormalQuadratureCompound( loc=array_ops.placeholder_with_default( 0., shape=[] if self.static_shape else None), scale=array_ops.placeholder_with_default( 1., shape=[] if self.static_shape else None), quadrature_size=10, validate_args=True) self.run_test_sample_consistent_mean_variance( sess.run, pln, rtol=0.02) def testSampleProbConsistentBroadcastScalar(self): with self.cached_session() as sess: pln = poisson_lognormal.PoissonLogNormalQuadratureCompound( loc=array_ops.placeholder_with_default( [0., -0.5], shape=[2] if self.static_shape else None), scale=array_ops.placeholder_with_default( 1., shape=[] if self.static_shape else None), quadrature_size=10, validate_args=True) self.run_test_sample_consistent_log_prob( sess.run, pln, batch_size=2, rtol=0.1, atol=0.01) def testMeanVarianceBroadcastScalar(self): with self.cached_session() as sess: pln = poisson_lognormal.PoissonLogNormalQuadratureCompound( loc=array_ops.placeholder_with_default( [0., -0.5], shape=[2] if self.static_shape else None), scale=array_ops.placeholder_with_default( 1., shape=[] if self.static_shape else None), quadrature_size=10, validate_args=True) self.run_test_sample_consistent_mean_variance( sess.run, pln, rtol=0.1, atol=0.01) def testSampleProbConsistentBroadcastBoth(self): with self.cached_session() as sess: pln = poisson_lognormal.PoissonLogNormalQuadratureCompound( loc=array_ops.placeholder_with_default( [[0.], [-0.5]], shape=[2, 1] if self.static_shape else None), scale=array_ops.placeholder_with_default( [[1., 0.9]], shape=[1, 2] if self.static_shape else None), quadrature_size=10, validate_args=True) self.run_test_sample_consistent_log_prob( sess.run, pln, batch_size=4, rtol=0.1, atol=0.08) def testMeanVarianceBroadcastBoth(self): with self.cached_session() as sess: pln = poisson_lognormal.PoissonLogNormalQuadratureCompound( loc=array_ops.placeholder_with_default( [[0.], [-0.5]], shape=[2, 1] if self.static_shape else None), scale=array_ops.placeholder_with_default( [[1., 0.9]], shape=[1, 2] if self.static_shape else None), quadrature_size=10, validate_args=True) self.run_test_sample_consistent_mean_variance( sess.run, pln, rtol=0.1, atol=0.01) class PoissonLogNormalQuadratureCompoundStaticShapeTest( _PoissonLogNormalQuadratureCompoundTest, test.TestCase): @property def static_shape(self): return True class PoissonLogNormalQuadratureCompoundDynamicShapeTest( _PoissonLogNormalQuadratureCompoundTest, test.TestCase): @property def static_shape(self): return False if __name__ == "__main__": test.main()
5,660
assert equal hash
# Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. """Test Image widget""" import io import os from ipywidgets import Image import hashlib import pkgutil import tempfile from contextlib import contextmanager # Data @contextmanager def get_logo_png(): # Once the tests are not in the package, this context manager can be # replaced with the location of the actual file LOGO_DATA = pkgutil.get_data('ipywidgets.widgets.tests', 'data/jupyter-logo-transparent.png') handle, fname = tempfile.mkstemp() os.close(handle) with open(fname, 'wb') as f: f.write(LOGO_DATA) yield fname os.remove(fname) LOGO_PNG_DIGEST = '3ff9eafd7197083153e83339a72e7a335539bae189c33554c680e4382c98af02' def test_empty_image(): # Empty images shouldn't raise any errors Image() def test_image_value(): random_bytes = b'\x0ee\xca\x80\xcd\x9ak#\x7f\x07\x03\xa7' Image(value=random_bytes) def test_image_format(): # Test that these format names don't throw an error Image(format='png') Image(format='jpeg') Image(format='url') def test_from_filename(): with get_logo_png() as LOGO_PNG: img = Image.from_file(LOGO_PNG) METHOD_NAME(img.value, LOGO_PNG_DIGEST) def test_set_from_filename(): img = Image() with get_logo_png() as LOGO_PNG: img.set_value_from_file(LOGO_PNG) METHOD_NAME(img.value, LOGO_PNG_DIGEST) def test_from_file(): with get_logo_png() as LOGO_PNG: with open(LOGO_PNG, 'rb') as f: img = Image.from_file(f) METHOD_NAME(img.value, LOGO_PNG_DIGEST) def test_set_value_from_file(): img = Image() with get_logo_png() as LOGO_PNG: with open(LOGO_PNG, 'rb') as f: img.set_value_from_file(f) METHOD_NAME(img.value, LOGO_PNG_DIGEST) def test_from_url_unicode(): img = Image.from_url('https://jupyter.org/assets/main-logo.svg') assert img.value == b'https://jupyter.org/assets/main-logo.svg' def test_from_url_bytes(): img = Image.from_url(b'https://jupyter.org/assets/main-logo.svg') assert img.value == b'https://jupyter.org/assets/main-logo.svg' def test_format_inference_filename(): with tempfile.NamedTemporaryFile(suffix='.svg', delete=False) as f: name = f.name f.close() # Allow tests to run on Windows img = Image.from_file(name) assert img.format == 'svg+xml' def test_format_inference_file(): with tempfile.NamedTemporaryFile(suffix='.gif', delete=False) as f: img = Image.from_file(f) assert img.format == 'gif' def test_format_inference_stream(): # There's no way to infer the format, so it should default to png fstream = io.BytesIO(b'') img = Image.from_file(fstream) assert img.format == 'png' def test_serialize(): fstream = io.BytesIO(b'123') img = Image.from_file(fstream) img_state = img.get_state() # for python27 it is a memoryview assert isinstance(img_state['value'], (bytes, memoryview)) # make sure it is (for python 3), since that is what it will be once it comes off the wire img_state['value'] = memoryview(img_state['value']) # check that we can deserialize it and get back the original value img_copy = Image() img_copy.set_state(img_state) assert img.value == img_copy.value def test_format_inference_overridable(): with tempfile.NamedTemporaryFile(suffix='.svg', delete=False) as f: name = f.name f.close() # Allow tests to run on Windows img = Image.from_file(name, format='gif') assert img.format == 'gif' def test_value_repr_length(): with get_logo_png() as LOGO_PNG: with open(LOGO_PNG, 'rb') as f: img = Image.from_file(f) assert len(img.__repr__()) < 140 assert img.__repr__().endswith(")") assert img.__repr__()[-5:-2] == '...' def test_value_repr_url(): img = Image.from_url(b'https://jupyter.org/assets/main-logo.svg') assert 'https://jupyter.org/assets/main-logo.svg' in img.__repr__() # Helper functions def get_hash_hex(byte_str): m = hashlib.new('sha256') m.update(byte_str) return m.hexdigest() def METHOD_NAME(byte_str, digest): assert get_hash_hex(byte_str) == digest
5,661
mockingjay origin
# -*- coding: utf-8 -*- # """*********************************************************************************************""" # FileName [ upstream/mockingjay/hubconf.py ] # Synopsis [ the mockingjay torch hubconf ] # Author [ S3PRL ] # Copyright [ Copyleft(c), Speech Lab, NTU, Taiwan ] """*********************************************************************************************""" import os import torch from s3prl.util.download import _urls_to_filepaths from .expert import UpstreamExpert as _UpstreamExpert def mockingjay_local(ckpt, options_config=None, *args, **kwargs): """ The model from local ckpt ckpt (str): PATH feature_selection (int): -1 (default, the last layer) or an int in range(0, max_layer_num) """ assert os.path.isfile(ckpt) if options_config is not None: assert os.path.isfile(options_config) return _UpstreamExpert(ckpt, options_config, *args, **kwargs) def mockingjay_url(ckpt, refresh=False, *args, **kwargs): """ The model from URL ckpt (str): URL """ return mockingjay_local(_urls_to_filepaths(ckpt, refresh=refresh), *args, **kwargs) def mockingjay(refresh=False, *args, **kwargs): """ The default model refresh (bool): whether to download ckpt/config again if existed """ return METHOD_NAME(refresh=refresh, *args, **kwargs) ########### # ALIASES # ########### def METHOD_NAME(refresh=False, *args, **kwargs): """ The mockingjay large model on 360hr, with Lel as input and Linear as target refresh (bool): whether to download ckpt/config again if existed """ return mockingjay_logMelLinearLarge_T_AdamW_b32_500k_360hr_drop1( refresh=refresh, *args, **kwargs ) def mockingjay_100hr(refresh=False, *args, **kwargs): """ The mockingjay base model on 100hr refresh (bool): whether to download ckpt/config again if existed """ return mockingjay_logMelBase_T_AdamW_b32_200k_100hr( refresh=refresh, *args, **kwargs ) def mockingjay_960hr(refresh=False, *args, **kwargs): """ The mockingjay base model on 960hr refresh (bool): whether to download ckpt/config again if existed """ return mockingjay_logMelBase_T_AdamW_b32_1m_960hr_drop1( refresh=refresh, *args, **kwargs ) ########## # 100 HR # ########## def mockingjay_logMelBase_T_AdamW_b32_200k_100hr(refresh=False, *args, **kwargs): """ Feature: 80-dim log Mel Alteration: time Optimizer: AdamW Batch size: 32 Total steps: 200k Unlabled Speech: 100hr """ kwargs["ckpt"] = "https://www.dropbox.com/s/luorglf8mdg67l2/states-200000.ckpt?dl=1" return mockingjay_url(refresh=refresh, *args, **kwargs) ########## # 360 HR # ########## def mockingjay_logMelLinearLarge_T_AdamW_b32_500k_360hr_drop1( refresh=False, *args, **kwargs ): """ Feature: 80-dim log Mel (input) / 201-dim Linear (target) Alteration: time Optimizer: AdamW Batch size: 32 Total steps: 500k Unlabled Speech: 360hr """ kwargs["ckpt"] = "https://www.dropbox.com/s/zwsfa6w2iy2cc68/states-500000.ckpt?dl=1" return mockingjay_url(refresh=refresh, *args, **kwargs) ########## # 960 HR # ########## def mockingjay_logMelBase_T_AdamW_b32_1m_960hr(refresh=False, *args, **kwargs): """ Feature: 80-dim log Mel Alteration: time Optimizer: AdamW Batch size: 32 Total steps: 1M Unlabled Speech: 960hr """ kwargs[ "ckpt" ] = "https://www.dropbox.com/s/jzx0xggk663jev6/states-1000000.ckpt?dl=1" return mockingjay_url(refresh=refresh, *args, **kwargs) def mockingjay_logMelBase_T_AdamW_b32_1m_960hr_drop1(refresh=False, *args, **kwargs): """ Feature: 80-dim log Mel Alteration: time Optimizer: AdamW Batch size: 32 Total steps: 1M Unlabled Speech: 960hr Differences: Dropout of 0.1 (instead of 0.3) """ kwargs[ "ckpt" ] = "https://www.dropbox.com/s/7f9z6dzc7oix6qv/states-1000000.ckpt?dl=1" return mockingjay_url(refresh=refresh, *args, **kwargs) def mockingjay_logMelBase_T_AdamW_b32_1m_960hr_seq3k(refresh=False, *args, **kwargs): """ Feature: 80-dim log Mel Alteration: time Optimizer: AdamW Batch size: 32 Total steps: 1M Unlabled Speech: 960hr Differences: sequence length of 3k (instead of 1.5k) """ kwargs[ "ckpt" ] = "https://www.dropbox.com/s/qnnvdrai2tfmjmh/states-1000000.ckpt?dl=1" return mockingjay_url(refresh=refresh, *args, **kwargs)
5,662
get generator object code
# Copyright 2023, Kay Hayen, mailto:kay.hayen@gmail.com # # Part of "Nuitka", an optimizing Python compiler that is compatible and # integrates with CPython, but also works on its own. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Code to generate and interact with compiled function objects. """ from nuitka.PythonVersions import python_version from .CodeHelpers import generateStatementSequenceCode from .Emission import SourceCodeCollector from .FunctionCodes import ( finalizeFunctionLocalVariables, getClosureCopyCode, getFunctionCreationArgs, getFunctionQualnameObj, setupFunctionLocalVariables, ) from .Indentation import indented from .ModuleCodes import getModuleAccessCode from .templates.CodeTemplatesGeneratorFunction import ( template_generator_exception_exit, template_generator_noexception_exit, template_generator_return_exit, template_genfunc_yielder_body_template, template_genfunc_yielder_maker_decl, template_make_empty_generator, template_make_generator, ) from .YieldCodes import getYieldReturnDispatchCode def _getGeneratorMakerIdentifier(function_identifier): return "MAKE_GENERATOR_" + function_identifier def getGeneratorObjectDeclCode(function_identifier, closure_variables): generator_creation_args = getFunctionCreationArgs( defaults_name=None, kw_defaults_name=None, annotations_name=None, closure_variables=closure_variables, tstate=False, ) return template_genfunc_yielder_maker_decl % { "generator_maker_identifier": _getGeneratorMakerIdentifier(function_identifier), "generator_creation_args": ", ".join(generator_creation_args), } def METHOD_NAME( context, function_identifier, closure_variables, user_variables, outline_variables, temp_variables, needs_exception_exit, needs_generator_return, ): # A bit of details going on here, pylint: disable=too-many-locals setupFunctionLocalVariables( context=context, parameters=None, closure_variables=closure_variables, user_variables=user_variables + outline_variables, temp_variables=temp_variables, ) function_codes = SourceCodeCollector() generateStatementSequenceCode( statement_sequence=context.getOwner().subnode_body, allow_none=True, emit=function_codes, context=context, ) function_cleanup = finalizeFunctionLocalVariables(context) if needs_exception_exit: ( exception_type, exception_value, exception_tb, _exception_lineno, ) = context.variable_storage.getExceptionVariableDescriptions() generator_exit = template_generator_exception_exit % { "function_cleanup": indented(function_cleanup), "exception_type": exception_type, "exception_value": exception_value, "exception_tb": exception_tb, } else: generator_exit = template_generator_noexception_exit % { "function_cleanup": indented(function_cleanup) } if needs_generator_return: generator_exit += template_generator_return_exit % { "return_value": context.getReturnValueName() if python_version >= 0x300 else None } function_locals = context.variable_storage.makeCFunctionLevelDeclarations() local_type_decl = context.variable_storage.makeCStructLevelDeclarations() function_locals += context.variable_storage.makeCStructInits() generator_object_body = context.getOwner() if local_type_decl: heap_declaration = """\ struct %(function_identifier)s_locals *generator_heap = \ (struct %(function_identifier)s_locals *)generator->m_heap_storage;""" % { "function_identifier": function_identifier } else: heap_declaration = "" generator_creation_args = getFunctionCreationArgs( defaults_name=None, kw_defaults_name=None, annotations_name=None, closure_variables=closure_variables, tstate=False, ) return template_genfunc_yielder_body_template % { "function_identifier": function_identifier, "function_body": indented(function_codes.codes), "heap_declaration": indented(heap_declaration), "has_heap_declaration": 1 if heap_declaration != "" else 0, "function_local_types": indented(local_type_decl), "function_var_inits": indented(function_locals), "function_dispatch": indented(getYieldReturnDispatchCode(context)), "generator_maker_identifier": _getGeneratorMakerIdentifier(function_identifier), "generator_creation_args": ", ".join(generator_creation_args), "generator_exit": generator_exit, "generator_module": getModuleAccessCode(context), "generator_name_obj": context.getConstantCode( constant=generator_object_body.getFunctionName() ), "generator_qualname_obj": getFunctionQualnameObj( generator_object_body, context ), "code_identifier": context.getCodeObjectHandle( code_object=generator_object_body.getCodeObject() ), "closure_name": "closure" if closure_variables else "NULL", "closure_count": len(closure_variables), } def generateMakeGeneratorObjectCode(to_name, expression, emit, context): generator_object_body = expression.subnode_generator_ref.getFunctionBody() closure_variables = expression.getClosureVariableVersions() closure_name, closure_copy = getClosureCopyCode( closure_variables=closure_variables, context=context ) args = [] if closure_name: args.append(closure_name) # Special case empty generators. if generator_object_body.subnode_body is None: emit( template_make_empty_generator % { "closure_copy": indented(closure_copy, 0, True), "to_name": to_name, "generator_module": getModuleAccessCode(context), "generator_name_obj": context.getConstantCode( constant=generator_object_body.getFunctionName() ), "generator_qualname_obj": getFunctionQualnameObj( generator_object_body, context ), "code_identifier": context.getCodeObjectHandle( code_object=generator_object_body.getCodeObject() ), "closure_name": closure_name if closure_name is not None else "NULL", "closure_count": len(closure_variables), } ) else: emit( template_make_generator % { "generator_maker_identifier": _getGeneratorMakerIdentifier( generator_object_body.getCodeName() ), "to_name": to_name, "args": ", ".join(str(arg) for arg in args), "closure_copy": indented(closure_copy, 0, True), } ) context.addCleanupTempName(to_name)
5,663
test columns queries
from __future__ import annotations from datetime import timedelta from typing import TYPE_CHECKING from urllib.parse import quote import pytest from api import utils from tests.helper import create_invoice, create_product, create_token, create_user if TYPE_CHECKING: from httpx import AsyncClient as TestClient pytestmark = pytest.mark.anyio async def test_multiple_query(client: TestClient, token: str): user1 = await create_user(client) user2 = await create_user(client) query = f"{user1['email']}|{user2['email']}" resp = await client.get(f"/users?multiple=true&query={query}", headers={"Authorization": f"Bearer {token}"}) assert resp.status_code == 200 assert resp.json()["count"] == 2 async def test_next_prev_url(client: TestClient, token: str): # create multiple users await create_user(client) await create_user(client) resp = await client.get("/users?limit=1", headers={"Authorization": f"Bearer {token}"}) next_url = resp.json()["next"] assert next_url.endswith("/users?limit=1&offset=1") # previous resp = await client.get("/users?limit=1&offset=1", headers={"Authorization": f"Bearer {token}"}) prev_url = resp.json()["previous"] assert prev_url.endswith("/users?limit=1") # next resp = await client.get("/users?limit=1&offset=2", headers={"Authorization": f"Bearer {token}"}) prev_url = resp.json()["previous"] assert prev_url.endswith("/users?limit=1&offset=1") async def test_undefined_sort(client: TestClient, token: str): resp = await client.get("/users?sort=fake", headers={"Authorization": f"Bearer {token}"}) assert resp.json()["result"] == [] async def test_products_pagination(client: TestClient, user, token: str): product = await create_product(client, user["id"], token) resp = await client.get( f"/products?store={product['store_id']}&category={product['category']}& " f" min_price=0.001&max_price={product['price']}", headers={"Authorization": f"Bearer {token}"}, ) assert resp.json()["count"] > 0 async def test_token_pagination(client: TestClient, user): token_data = await create_token(client, user, app_id="998") permissions = ",".join(token_data["permissions"]) resp = await client.get( f"/token?app_id={token_data['app_id']}&redirect_url={token_data['redirect_url']}&permissions={permissions}", headers={"Authorization": f"Bearer {token_data['id']}"}, ) assert resp.json()["count"] == 1 async def check_query(client: TestClient, token: str, column, value, expected_count, allow_nonexisting=False): query = quote(f"{column}:{value}") resp = await client.get(f"/invoices?query={query}", headers={"Authorization": f"Bearer {token}"}) assert resp.status_code == 200 assert resp.json()["count"] == expected_count if not allow_nonexisting: for item in resp.json()["result"]: assert item[column] == value async def METHOD_NAME(client: TestClient, user, token): await create_invoice(client, user["id"], token, currency="USD") await create_invoice(client, user["id"], token, currency="EUR") await check_query(client, token, "currency", "USD", 1) await check_query(client, token, "currency", "EUR", 1) async def test_undefined_column_query(client: TestClient, user, token): await create_invoice(client, user["id"], token, currency="test") await check_query(client, token, "test", "test", 1, allow_nonexisting=True) # skips undefined columns async def test_bad_type_column_query(client: TestClient, user, token): await create_invoice(client, user["id"], token, price=10) await check_query(client, token, "price", "test", 0) async def check_start_date_query(client, token, date, expected_count, first_id, start=True): query = quote(f"start_date:{date}") if start else quote(f"end_date:{date}") ind = 0 if start else -1 resp = await client.get(f"/invoices?query={query}&sort=created&desc=false", headers={"Authorization": f"Bearer {token}"}) assert resp.status_code == 200 assert resp.json()["count"] == expected_count assert resp.json()["result"][ind]["id"] == first_id async def test_date_pagination(client: TestClient, user, token): now = utils.time.now() invoice1 = await create_invoice(client, user["id"], token, created=(now - timedelta(hours=1)).isoformat()) invoice2 = await create_invoice(client, user["id"], token, created=(now - timedelta(days=1)).isoformat()) invoice3 = await create_invoice(client, user["id"], token, created=(now - timedelta(weeks=1)).isoformat()) await check_start_date_query(client, token, "-2h", 1, invoice1["id"]) await check_start_date_query(client, token, "-2d", 2, invoice2["id"]) await check_start_date_query(client, token, "-2w", 3, invoice3["id"]) await check_start_date_query(client, token, "-1w", 1, invoice3["id"], start=False) await check_start_date_query(client, token, "-1d", 2, invoice2["id"], start=False) await check_start_date_query(client, token, "-1h", 3, invoice1["id"], start=False)
5,664
upload
""" Generate CSV files for submission and assessment data, then upload to S3. """ import datetime import os import os.path import shutil import sys import tarfile import tempfile from django.core.management.base import BaseCommand, CommandError from openassessment.data import CsvWriter from openassessment.fileupload.backends.s3 import _connect_to_s3 class Command(BaseCommand): """ Create and upload CSV files for submission and assessment data. """ help = 'Create and upload CSV files for submission and assessment data.' args = '<COURSE_ID> <S3_BUCKET_NAME>' OUTPUT_CSV_PATHS = { output_name: f"{output_name}.csv" for output_name in CsvWriter.MODELS } URL_EXPIRATION_HOURS = 24 PROGRESS_INTERVAL = 10 def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._history = [] self._submission_counter = 0 @property def history(self): """ Return the upload history, which is useful for testing. Returns: list of dictionaries with keys 'url' and 'key' """ return self._history def handle(self, *args, **options): """ Execute the command. Args: course_id (unicode): The ID of the course to use. s3_bucket_name (unicode): The name of the S3 bucket to upload to. Raises: CommandError """ if len(args) < 2: raise CommandError(f'Usage: upload_oa_data {self.args}') course_id, s3_bucket = args[0], args[1] if isinstance(course_id, bytes): course_id = course_id.decode('utf-8') if isinstance(s3_bucket, bytes): s3_bucket = s3_bucket.decode('utf-8') csv_dir = tempfile.mkdtemp() try: print(f"Generating CSV files for course '{course_id}'") self._dump_to_csv(course_id, csv_dir) print(f"Creating archive of CSV files in {csv_dir}") archive_path = self._create_archive(csv_dir) print(f"Uploading {archive_path} to {s3_bucket}/{course_id}") url = self.METHOD_NAME(course_id, archive_path, s3_bucket) print("== Upload successful ==") print(f"Download URL (expires in {self.URL_EXPIRATION_HOURS} hours):\n{url}") finally: # Assume that the archive was created in the directory, # so to clean up we just need to delete the directory. shutil.rmtree(csv_dir) def _dump_to_csv(self, course_id, csv_dir): """ Create CSV files for submission/assessment data in a directory. Args: course_id (unicode): The ID of the course to dump data from. csv_dir (unicode): The absolute path to the directory in which to create CSV files. Returns: None """ output_streams = { name: open(os.path.join(csv_dir, rel_path), 'w') # pylint: disable=consider-using-with for name, rel_path in self.OUTPUT_CSV_PATHS.items() } csv_writer = CsvWriter(output_streams, self._progress_callback) csv_writer.write_to_csv(course_id) def _create_archive(self, dir_path): """ Create an archive of a directory. Args: dir_path (unicode): The absolute path to the directory containing the CSV files. Returns: unicode: Absolute path to the archive. """ tarball_name = "{}.tar.gz".format( datetime.datetime.utcnow().strftime("%Y-%m-%dT%H_%M") ) tarball_path = os.path.join(dir_path, tarball_name) with tarfile.open(tarball_path, "w:gz") as tar: for rel_path in self.OUTPUT_CSV_PATHS.values(): tar.add(os.path.join(dir_path, rel_path), arcname=rel_path) return tarball_path def METHOD_NAME(self, course_id, file_path, s3_bucket): """ Upload a file. Args: course_id (unicode): The ID of the course. file_path (unicode): Absolute path to the file to upload. s3_bucket (unicode): Name of the S3 bucket where the file will be uploaded. Returns: str: URL to access the uploaded archive. """ conn = _connect_to_s3() key_name = os.path.join(course_id, os.path.split(file_path)[1]) with open(file_path, "rb") as f: conn.put_object( Bucket=s3_bucket, Key=key_name, Body=f.read() ) url = conn.generate_presigned_url( "get_object", Params={ "Bucket": s3_bucket, "Key": key_name, }, ExpiresIn=self.URL_EXPIRATION_HOURS * 3600 ) # Store the key and url in the history self._history.append({'key': key_name, 'url': url}) return url def _progress_callback(self): """ Indicate progress to the user as submissions are processed. """ self._submission_counter += 1 if self._submission_counter > 0 and self._submission_counter % self.PROGRESS_INTERVAL == 0: sys.stdout.write('.') sys.stdout.flush()
5,665
callback manager
from __future__ import annotations from typing import Any, Optional, Tuple, cast from llama_index.bridge.pydantic import PrivateAttr from llama_index.prompts import BasePromptTemplate from llama_index.callbacks import CallbackManager from llama_index.callbacks.schema import CBEventType, EventPayload from llama_index.llm_predictor.base import LLM, BaseLLMPredictor, LLMMetadata from llama_index.llm_predictor.vellum.exceptions import VellumGenerateException from llama_index.llm_predictor.vellum.prompt_registry import VellumPromptRegistry from llama_index.llm_predictor.vellum.types import ( VellumCompiledPrompt, VellumRegisteredPrompt, ) from llama_index.types import TokenAsyncGen, TokenGen class VellumPredictor(BaseLLMPredictor): _callback_manager: CallbackManager = PrivateAttr(default_factory=CallbackManager) _vellum_client: Any = PrivateAttr() _async_vellum_client = PrivateAttr() _prompt_registry: Any = PrivateAttr() class Config: arbitrary_types_allowed = True def __init__( self, vellum_api_key: str, METHOD_NAME: Optional[CallbackManager] = None, ) -> None: import_err_msg = ( "`vellum` package not found, please run `pip install vellum-ai`" ) try: from vellum.client import AsyncVellum, Vellum # noqa: F401 except ImportError: raise ImportError(import_err_msg) self._callback_manager = METHOD_NAME or CallbackManager([]) # Vellum-specific self._vellum_client = Vellum(api_key=vellum_api_key) self._async_vellum_client = AsyncVellum(api_key=vellum_api_key) self._prompt_registry = VellumPromptRegistry(vellum_api_key=vellum_api_key) super().__init__() @classmethod def class_name(cls) -> str: """Get class name.""" return "VellumPredictor" @property def metadata(self) -> LLMMetadata: """Get LLM metadata.""" # Note: We use default values here, but ideally we would retrieve this metadata # via Vellum's API based on the LLM that backs the registered prompt's # deployment. This is not currently possible, so we use default values. return LLMMetadata() @property def METHOD_NAME(self) -> CallbackManager: """Get callback manager.""" return self._callback_manager @property def llm(self) -> LLM: """Get the LLM.""" raise NotImplementedError("Vellum does not expose the LLM.") def predict(self, prompt: BasePromptTemplate, **prompt_args: Any) -> str: """Predict the answer to a query.""" from vellum import GenerateRequest registered_prompt, compiled_prompt, event_id = self._prepare_generate_call( prompt, **prompt_args ) input_values = { **prompt.kwargs, **prompt_args, } result = self._vellum_client.generate( deployment_id=registered_prompt.deployment_id, requests=[GenerateRequest(input_values=input_values)], ) completion_text = self._process_generate_response( result, compiled_prompt, event_id ) return completion_text def stream(self, prompt: BasePromptTemplate, **prompt_args: Any) -> TokenGen: """Stream the answer to a query.""" from vellum import GenerateRequest, GenerateStreamResult registered_prompt, compiled_prompt, event_id = self._prepare_generate_call( prompt, **prompt_args ) input_values = { **prompt.kwargs, **prompt_args, } responses = self._vellum_client.generate_stream( deployment_id=registered_prompt.deployment_id, requests=[GenerateRequest(input_values=input_values)], ) def text_generator() -> TokenGen: complete_text = "" while True: try: stream_response = next(responses) except StopIteration: self.METHOD_NAME.on_event_end( CBEventType.LLM, payload={ EventPayload.RESPONSE: complete_text, EventPayload.PROMPT: compiled_prompt.text, }, event_id=event_id, ) break result: GenerateStreamResult = stream_response.delta if result.error: raise VellumGenerateException(result.error.message) elif not result.data: raise VellumGenerateException( "Unknown error occurred while generating" ) completion_text_delta = result.data.completion.text complete_text += completion_text_delta yield completion_text_delta return text_generator() async def apredict(self, prompt: BasePromptTemplate, **prompt_args: Any) -> str: """Asynchronously predict the answer to a query.""" from vellum import GenerateRequest registered_prompt, compiled_prompt, event_id = self._prepare_generate_call( prompt, **prompt_args ) input_values = { **prompt.kwargs, **prompt_args, } result = await self._async_vellum_client.generate( deployment_id=registered_prompt.deployment_id, requests=[GenerateRequest(input_values=input_values)], ) completion_text = self._process_generate_response( result, compiled_prompt, event_id ) return completion_text async def astream( self, prompt: BasePromptTemplate, **prompt_args: Any ) -> TokenAsyncGen: async def gen() -> TokenAsyncGen: for token in self.stream(prompt, **prompt_args): yield token # NOTE: convert generator to async generator return gen() def _prepare_generate_call( self, prompt: BasePromptTemplate, **prompt_args: Any ) -> Tuple[VellumRegisteredPrompt, VellumCompiledPrompt, str]: """Prepare a generate call.""" registered_prompt = self._prompt_registry.from_prompt(prompt) compiled_prompt = self._prompt_registry.get_compiled_prompt( registered_prompt, prompt_args ) cb_payload = { **prompt_args, "deployment_id": registered_prompt.deployment_id, "model_version_id": registered_prompt.model_version_id, } event_id = self.METHOD_NAME.on_event_start( CBEventType.LLM, payload=cb_payload, ) return registered_prompt, compiled_prompt, event_id def _process_generate_response( self, result: Any, compiled_prompt: VellumCompiledPrompt, event_id: str, ) -> str: """Process the response from a generate call.""" from vellum import GenerateResponse result = cast(GenerateResponse, result) completion_text = result.text self.METHOD_NAME.on_event_end( CBEventType.LLM, payload={ EventPayload.RESPONSE: completion_text, EventPayload.PROMPT: compiled_prompt.text, }, event_id=event_id, ) return completion_text
5,666
test directory radius server bad auth protocol
from unittest import mock from prowler.providers.aws.services.directoryservice.directoryservice_service import ( AuthenticationProtocol, Directory, DirectoryType, RadiusSettings, RadiusStatus, ) AWS_REGION = "eu-west-1" AWS_ACCOUNT_NUMBER = "123456789012" class Test_directoryservice_radius_server_security_protocol: def test_no_directories(self): directoryservice_client = mock.MagicMock directoryservice_client.directories = {} with mock.patch( "prowler.providers.aws.services.directoryservice.directoryservice_service.DirectoryService", new=directoryservice_client, ): # Test Check from prowler.providers.aws.services.directoryservice.directoryservice_radius_server_security_protocol.directoryservice_radius_server_security_protocol import ( directoryservice_radius_server_security_protocol, ) check = directoryservice_radius_server_security_protocol() result = check.execute() assert len(result) == 0 def test_directory_no_radius_server(self): directoryservice_client = mock.MagicMock directory_name = "test-directory" directory_id = "d-12345a1b2" directory_arn = ( f"arn:aws:ds:{AWS_REGION}:{AWS_ACCOUNT_NUMBER}:directory/d-12345a1b2" ) directoryservice_client.directories = { directory_name: Directory( name=directory_name, id=directory_id, arn=directory_arn, type=DirectoryType.MicrosoftAD, region=AWS_REGION, radius_settings=None, ) } with mock.patch( "prowler.providers.aws.services.directoryservice.directoryservice_service.DirectoryService", new=directoryservice_client, ): # Test Check from prowler.providers.aws.services.directoryservice.directoryservice_radius_server_security_protocol.directoryservice_radius_server_security_protocol import ( directoryservice_radius_server_security_protocol, ) check = directoryservice_radius_server_security_protocol() result = check.execute() assert len(result) == 0 def METHOD_NAME(self): directoryservice_client = mock.MagicMock directory_name = "test-directory" directory_id = "d-12345a1b2" directory_arn = ( f"arn:aws:ds:{AWS_REGION}:{AWS_ACCOUNT_NUMBER}:directory/d-12345a1b2" ) directoryservice_client.directories = { directory_name: Directory( name=directory_name, id=directory_id, arn=directory_arn, type=DirectoryType.MicrosoftAD, region=AWS_REGION, radius_settings=RadiusSettings( authentication_protocol=AuthenticationProtocol.MS_CHAPv1, status=RadiusStatus.Completed, ), ) } with mock.patch( "prowler.providers.aws.services.directoryservice.directoryservice_service.DirectoryService", new=directoryservice_client, ): # Test Check from prowler.providers.aws.services.directoryservice.directoryservice_radius_server_security_protocol.directoryservice_radius_server_security_protocol import ( directoryservice_radius_server_security_protocol, ) check = directoryservice_radius_server_security_protocol() result = check.execute() assert len(result) == 1 assert result[0].resource_id == directory_id assert result[0].resource_arn == directory_arn assert result[0].resource_tags == [] assert result[0].region == AWS_REGION assert result[0].status == "FAIL" assert ( result[0].status_extended == f"Radius server of Directory {directory_id} does not have recommended security protocol for the Radius server." ) def test_directory_radius_server_secure_auth_protocol(self): directoryservice_client = mock.MagicMock directory_name = "test-directory" directory_id = "d-12345a1b2" directory_arn = ( f"arn:aws:ds:{AWS_REGION}:{AWS_ACCOUNT_NUMBER}:directory/d-12345a1b2" ) directoryservice_client.directories = { directory_name: Directory( name=directory_name, id=directory_id, arn=directory_arn, type=DirectoryType.MicrosoftAD, region=AWS_REGION, radius_settings=RadiusSettings( authentication_protocol=AuthenticationProtocol.MS_CHAPv2, status=RadiusStatus.Completed, ), ) } with mock.patch( "prowler.providers.aws.services.directoryservice.directoryservice_service.DirectoryService", new=directoryservice_client, ): # Test Check from prowler.providers.aws.services.directoryservice.directoryservice_radius_server_security_protocol.directoryservice_radius_server_security_protocol import ( directoryservice_radius_server_security_protocol, ) check = directoryservice_radius_server_security_protocol() result = check.execute() assert len(result) == 1 assert result[0].resource_id == directory_id assert result[0].resource_arn == directory_arn assert result[0].resource_tags == [] assert result[0].region == AWS_REGION assert result[0].status == "PASS" assert ( result[0].status_extended == f"Radius server of Directory {directory_id} have recommended security protocol for the Radius server." )
5,667
submit request to change email
import logging from aiohttp import web from aiohttp.web import RouteTableDef from models_library.emails import LowerCaseEmailStr from pydantic import SecretStr, validator from servicelib.aiohttp.requests_validation import parse_request_body_as from servicelib.mimetype_constants import MIMETYPE_APPLICATION_JSON from servicelib.request_keys import RQT_USERID_KEY from .._meta import API_VTAG from ..products.plugin import Product, get_current_product from ..security.api import check_password, encrypt_password from ..utils import HOUR from ..utils_rate_limiting import global_rate_limit_route from ._confirmation import is_confirmation_allowed, make_confirmation_link from ._constants import ( MSG_CANT_SEND_MAIL, MSG_CHANGE_EMAIL_REQUESTED, MSG_EMAIL_SENT, MSG_OFTEN_RESET_PASSWORD, MSG_PASSWORD_CHANGED, MSG_UNKNOWN_EMAIL, MSG_WRONG_PASSWORD, ) from ._models import InputSchema, create_password_match_validator from .decorators import login_required from .settings import LoginOptions, get_plugin_options from .storage import AsyncpgStorage, get_plugin_storage from .utils import ( ACTIVE, CHANGE_EMAIL, RESET_PASSWORD, flash_response, validate_user_status, ) from .utils_email import get_template_path, send_email_from_template _logger = logging.getLogger(__name__) routes = RouteTableDef() class ResetPasswordBody(InputSchema): email: str @routes.post(f"/{API_VTAG}/auth/reset-password", name="auth_reset_password") @global_rate_limit_route(number_of_requests=10, interval_seconds=HOUR) async def submit_request_to_reset_password(request: web.Request): """ 1. confirm user exists 2. check user status 3. send email with link to reset password 4. user clicks confirmation link -> auth/confirmation/{} -> reset_password_allowed Follows guidelines from [1]: https://postmarkapp.com/guides/password-reset-email-best-practices - You would never want to confirm or deny the existence of an account with a given email or username. - Expiration of link - Support contact information - Who requested the reset? """ db: AsyncpgStorage = get_plugin_storage(request.app) cfg: LoginOptions = get_plugin_options(request.app) product: Product = get_current_product(request) request_body = await parse_request_body_as(ResetPasswordBody, request) user = await db.get_user({"email": request_body.email}) try: if not user: raise web.HTTPUnprocessableEntity( reason=MSG_UNKNOWN_EMAIL, content_type=MIMETYPE_APPLICATION_JSON ) # 422 validate_user_status(user=user, support_email=product.support_email) assert user["status"] == ACTIVE # nosec assert user["email"] == request_body.email # nosec if not await is_confirmation_allowed(cfg, db, user, action=RESET_PASSWORD): raise web.HTTPUnauthorized( reason=MSG_OFTEN_RESET_PASSWORD, content_type=MIMETYPE_APPLICATION_JSON, ) # 401 except web.HTTPError as err: try: await send_email_from_template( request, from_=product.support_email, to=request_body.email, template=await get_template_path( request, "reset_password_email_failed.jinja2" ), context={ "host": request.host, "reason": err.reason, }, ) except Exception as err_mail: # pylint: disable=broad-except _logger.exception("Cannot send email") raise web.HTTPServiceUnavailable(reason=MSG_CANT_SEND_MAIL) from err_mail else: confirmation = await db.create_confirmation(user["id"], action=RESET_PASSWORD) link = make_confirmation_link(request, confirmation) try: # primary reset email with a URL and the normal instructions. await send_email_from_template( request, from_=product.support_email, to=request_body.email, template=await get_template_path( request, "reset_password_email.jinja2" ), context={ "host": request.host, "link": link, }, ) except Exception as err: # pylint: disable=broad-except _logger.exception("Can not send email") await db.delete_confirmation(confirmation) raise web.HTTPServiceUnavailable(reason=MSG_CANT_SEND_MAIL) from err return flash_response(MSG_EMAIL_SENT.format(email=request_body.email), "INFO") class ChangeEmailBody(InputSchema): email: LowerCaseEmailStr @routes.post(f"/{API_VTAG}/auth/change-email", name="auth_change_email") @login_required async def METHOD_NAME(request: web.Request): db: AsyncpgStorage = get_plugin_storage(request.app) product: Product = get_current_product(request) request_body = await parse_request_body_as(ChangeEmailBody, request) user = await db.get_user({"id": request[RQT_USERID_KEY]}) assert user # nosec if user["email"] == request_body.email: return flash_response("Email changed") other = await db.get_user({"email": request_body.email}) if other: raise web.HTTPUnprocessableEntity(reason="This email cannot be used") # Reset if previously requested confirmation = await db.get_confirmation({"user": user, "action": CHANGE_EMAIL}) if confirmation: await db.delete_confirmation(confirmation) # create new confirmation to ensure email is actually valid confirmation = await db.create_confirmation( user["id"], CHANGE_EMAIL, request_body.email ) link = make_confirmation_link(request, confirmation) try: await send_email_from_template( request, from_=product.support_email, to=request_body.email, template=await get_template_path(request, "change_email_email.jinja2"), context={ "host": request.host, "link": link, }, ) except Exception as err: # pylint: disable=broad-except _logger.error("Can not send email") await db.delete_confirmation(confirmation) raise web.HTTPServiceUnavailable(reason=MSG_CANT_SEND_MAIL) from err return flash_response(MSG_CHANGE_EMAIL_REQUESTED) class ChangePasswordBody(InputSchema): current: SecretStr new: SecretStr confirm: SecretStr _password_confirm_match = validator("confirm", allow_reuse=True)( create_password_match_validator(reference_field="new") ) @routes.post(f"/{API_VTAG}/auth/change-password", name="auth_change_password") @login_required async def change_password(request: web.Request): db: AsyncpgStorage = get_plugin_storage(request.app) passwords = await parse_request_body_as(ChangePasswordBody, request) user = await db.get_user({"id": request[RQT_USERID_KEY]}) assert user # nosec if not check_password(passwords.current.get_secret_value(), user["password_hash"]): raise web.HTTPUnprocessableEntity( reason=MSG_WRONG_PASSWORD, content_type=MIMETYPE_APPLICATION_JSON ) # 422 await db.update_user( user, {"password_hash": encrypt_password(passwords.new.get_secret_value())} ) return flash_response(MSG_PASSWORD_CHANGED)
5,668
is null
import numbers from pyrsistent import pmap import attr from jsonschema.compat import int_types, str_types from jsonschema.exceptions import UndefinedTypeCheck def is_array(checker, instance): return isinstance(instance, list) def is_bool(checker, instance): return isinstance(instance, bool) def is_integer(checker, instance): # bool inherits from int, so ensure bools aren't reported as ints if isinstance(instance, bool): return False return isinstance(instance, int_types) def METHOD_NAME(checker, instance): return instance is None def is_number(checker, instance): # bool inherits from int, so ensure bools aren't reported as ints if isinstance(instance, bool): return False return isinstance(instance, numbers.Number) def is_object(checker, instance): return isinstance(instance, dict) def is_string(checker, instance): return isinstance(instance, str_types) def is_any(checker, instance): return True @attr.s(frozen=True) class TypeChecker(object): """ A ``type`` property checker. A `TypeChecker` performs type checking for an `IValidator`. Type checks to perform are updated using `TypeChecker.redefine` or `TypeChecker.redefine_many` and removed via `TypeChecker.remove`. Each of these return a new `TypeChecker` object. Arguments: type_checkers (dict): The initial mapping of types to their checking functions. """ _type_checkers = attr.ib(default=pmap(), converter=pmap) def is_type(self, instance, type): """ Check if the instance is of the appropriate type. Arguments: instance (object): The instance to check type (str): The name of the type that is expected. Returns: bool: Whether it conformed. Raises: `jsonschema.exceptions.UndefinedTypeCheck`: if type is unknown to this object. """ try: fn = self._type_checkers[type] except KeyError: raise UndefinedTypeCheck(type) return fn(self, instance) def redefine(self, type, fn): """ Produce a new checker with the given type redefined. Arguments: type (str): The name of the type to check. fn (collections.Callable): A function taking exactly two parameters - the type checker calling the function and the instance to check. The function should return true if instance is of this type and false otherwise. Returns: A new `TypeChecker` instance. """ return self.redefine_many({type: fn}) def redefine_many(self, definitions=()): """ Produce a new checker with the given types redefined. Arguments: definitions (dict): A dictionary mapping types to their checking functions. Returns: A new `TypeChecker` instance. """ return attr.evolve( self, type_checkers=self._type_checkers.update(definitions), ) def remove(self, *types): """ Produce a new checker with the given types forgotten. Arguments: types (~collections.Iterable): the names of the types to remove. Returns: A new `TypeChecker` instance Raises: `jsonschema.exceptions.UndefinedTypeCheck`: if any given type is unknown to this object """ checkers = self._type_checkers for each in types: try: checkers = checkers.remove(each) except KeyError: raise UndefinedTypeCheck(each) return attr.evolve(self, type_checkers=checkers) draft3_type_checker = TypeChecker( { u"any": is_any, u"array": is_array, u"boolean": is_bool, u"integer": is_integer, u"object": is_object, u"null": METHOD_NAME, u"number": is_number, u"string": is_string, }, ) draft4_type_checker = draft3_type_checker.remove(u"any") draft6_type_checker = draft4_type_checker.redefine( u"integer", lambda checker, instance: ( is_integer(checker, instance) or isinstance(instance, float) and instance.is_integer() ), ) draft7_type_checker = draft6_type_checker
5,669
parameterized test elman
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Recurrent ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.contrib.recurrent.python.ops import recurrent from tensorflow.python.framework import dtypes from tensorflow.python.framework import function from tensorflow.python.framework import random_seed from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.platform import test as test_lib from tensorflow.python.platform import tf_logging as logging _ElmanState = collections.namedtuple('ElmanState', ('h')) _ElmanTheta = collections.namedtuple('ElmanTheta', ('w', 'b')) _ElmanInputs = collections.namedtuple('ElmanInputs', ('x')) # TODO(drpng): add test for max length computation. class RecurrentTest(test_util.TensorFlowTestCase): def testBasic(self): # pylint:disable=invalid-name _PolyState = collections.namedtuple('PolyState', ('value', 'x_power')) _PolyTheta = collections.namedtuple('PolyTheta', ('x')) _PolyInputs = collections.namedtuple('PolyInputs', ('coeff')) # pylint:enable=invalid-name def Poly(theta, state, inputs): next_state = _PolyState( value=state.value + inputs.coeff * state.x_power, x_power=state.x_power * theta.x) return next_state, [] with self.cached_session() as sess: theta = _PolyTheta(x=array_ops.constant(2.0)) state = _PolyState( value=array_ops.constant(0.0), x_power=array_ops.constant(1.0)) inputs = _PolyInputs(coeff=array_ops.constant([1., 2., 3.])) # x = 2 # 1 + 2*x + 3*x^2 ret = recurrent.Recurrent(theta, state, inputs, Poly) acc, state = sess.run(ret) self.assertAllClose(acc.value, [1., 5., 17.]) self.assertAllClose(acc.x_power, [2., 4., 8.]) self.assertAllClose(state.value, 17.) self.assertAllClose(state.x_power, 8.) y = ret[1].value dx, d_coeff = gradients_impl.gradients(ys=[y], xs=[theta.x, inputs.coeff]) dx_val, d_coeff_val = sess.run([dx, d_coeff]) # 2 + 6*x self.assertAllClose(dx_val, 14.) self.assertAllClose(d_coeff_val, [1., 2., 4.]) # acc = [1, 1+2x, 1+2x+3x^2] # sum(acc) = 3 + 4x + 3x^2 acc = ret[0].value dx, d_coeff = gradients_impl.gradients( ys=[math_ops.reduce_sum(acc)], xs=[theta.x, inputs.coeff]) dx_val, d_coeff_val = sess.run([dx, d_coeff]) # 4 + 6*x self.assertAllClose(dx_val, 16.) self.assertAllClose(d_coeff_val, [3., 4., 4.]) @staticmethod def Rand(shape): return random_ops.random_uniform( shape, minval=-0.2, maxval=0.2, dtype=dtypes.float64) @staticmethod def Elman(theta, state0, inputs): h0, w, b, x = state0.h, theta.w, theta.b, inputs.x xw = math_ops.matmul(array_ops.concat([x, h0], axis=1), w) h1 = math_ops.sigmoid(xw + b) state1 = _ElmanState(h=h1) return (state1, state1) @staticmethod def ElmanGrad(theta, state0, inputs, extras, dstate1): @function.Defun() def Grad(h0, w, b, x, h1, dh1): del b # We hand-roll the gradient for the 2nd half of the cell as a demo. dxwb = (dh1 * (1 - h1) * h1) dxw, db = dxwb, math_ops.reduce_sum(dxwb, axis=0) # Uses tf.gradient for the 1nd half of the cell as a demo. xw = math_ops.matmul(array_ops.concat([x, h0], axis=1), w) dh0, dx, dw = gradients_impl.gradients( ys=[xw], xs=[h0, x, w], grad_ys=[dxw]) return dh0, dx, dw, db dh0, dx, dw, db = Grad(state0.h, theta.w, theta.b, inputs.x, extras.h, dstate1.h) dstate0 = _ElmanState(h=dh0) dinputs = _ElmanInputs(x=dx) return (_ElmanTheta(w=dw, b=db), dstate0, dinputs) @staticmethod def ElmanOut(state1): return _ElmanState(x=state1.h) @staticmethod def ElmanOutGrad(dout): return _ElmanState(h=dout.x) def testElman(self): for seqlen, use_grad in [(1, False), (1, True), (7, False), (7, True)]: logging.info('== Elman: seqlen=%s, use_grad=%s', seqlen, use_grad) self.METHOD_NAME(seqlen, use_grad) def METHOD_NAME(self, seqlen, use_grad): with self.cached_session() as sess: random_seed.set_random_seed(342462) batch = 3 dims = 4 theta = _ElmanTheta(w=RecurrentTest.Rand([2 * dims, dims]), b=RecurrentTest.Rand([dims])) state0 = _ElmanState(h=RecurrentTest.Rand([batch, dims])) inputs = _ElmanInputs(x=RecurrentTest.Rand([seqlen, batch, dims])) # Statically unrolled. s = state0 out = [] for i in xrange(seqlen): inp = _ElmanInputs(x=inputs.x[i, :]) s, _ = RecurrentTest.Elman(theta, s, inp) out += [s.h] acc0, final0 = array_ops.stack(out), s.h loss0 = math_ops.reduce_sum(acc0) + math_ops.reduce_sum(final0) (dw0, db0, dh0, di0) = gradients_impl.gradients( loss0, [theta.w, theta.b, state0.h, inputs.x]) acc1, final1 = recurrent.Recurrent( theta=theta, state0=state0, inputs=inputs, cell_fn=RecurrentTest.Elman, cell_grad=RecurrentTest.ElmanGrad if use_grad else None) assert isinstance(acc1, _ElmanState) assert isinstance(final1, _ElmanState) acc1, final1 = acc1.h, final1.h loss1 = math_ops.reduce_sum(acc1) + math_ops.reduce_sum(final1) (dw1, db1, dh1, di1) = gradients_impl.gradients( loss1, [theta.w, theta.b, state0.h, inputs.x]) # Fetches a few values and compare them. (acc0, acc1, final0, final1, dw0, dw1, db0, db1, dh0, dh1, di0, di1) = sess.run( [acc0, acc1, final0, final1, dw0, dw1, db0, db1, dh0, dh1, di0, di1]) self.assertAllClose(acc0, acc1) self.assertAllClose(final0, final1) self.assertAllClose(dw0, dw1) self.assertAllClose(db0, db1) self.assertAllClose(dh0, dh1) self.assertAllClose(di0, di1) if __name__ == '__main__': test_lib.main()
5,670
test array cast
# *************************************************************** # Copyright (c) 2023 Jittor. All Rights Reserved. # Maintainers: Dun Liang <randonlang@gmail.com>. # This file is subject to the terms and conditions defined in # file 'LICENSE.txt', which is part of this source code package. # *************************************************************** import unittest import jittor as jt from .test_core import expect_error import numpy as np from jittor import init, Module import numpy as np @unittest.skipIf(not jt.compiler.has_acl, "No ACL found") class TestACL(unittest.TestCase): @jt.flag_scope(use_acl=1) def test_array(self): print("use_acl", jt.flags.use_acl) a = jt.array([1,2,3]) np.testing.assert_allclose(a.numpy(), [1,2,3]) @jt.flag_scope(use_acl=1) def test_add(self): a = jt.array([1,2,3]) b = a+a np.testing.assert_allclose(b.numpy(), [2,4,6]) @jt.flag_scope(use_acl=1) def test_add_float(self): a = jt.array([1.0,2.0,3.0]) b = a+a np.testing.assert_allclose(b.numpy(), [2,4,6]) @jt.flag_scope(use_acl=1) def METHOD_NAME(self): # this test cannot pass because cast error x = np.random.rand(10) y = jt.float32(x) np.testing.assert_allclose(x, y.numpy()) @jt.flag_scope(use_acl=1) def test_array_cast_half(self): # this test cannot pass because cast error x = np.random.rand(10).astype("float32") y = jt.float16(x) np.testing.assert_allclose(x.astype("float16"), y.numpy()) @jt.flag_scope(use_acl=1) def test_rand(self): a = jt.rand(10) b = a*10 b.sync() print(b) def test_meminfo(self): jt.display_memory_info() @jt.flag_scope(use_acl=1) def test_conv(self): x = jt.rand(10, 3, 50, 50) w = jt.rand(4,3,3,3) # x = jt.rand(2, 2, 1, 1) # w = jt.rand(2,2,1,1) y = jt.nn.conv2d(x, w) y.sync(True) y1 = y.data mask = jt.rand_like(y) dx, dw = jt.grad((y*mask).sum(), [x, w]) dx1, dw1 = dx.data, dw.data # dw, = jt.grad((y*mask).sum(), [w]) # dw1 = dw.data with jt.flag_scope(use_acl=0): y = jt.nn.conv2d(x, w) y2 = y.data dx, dw = jt.grad((y*mask).sum(), [x, w]) dx2, dw2 = dx.data, dw.data # dw, = jt.grad((y*mask).sum(), [w]) # dw2 = dw.data np.testing.assert_allclose(y1, y2) np.testing.assert_allclose(dx1, dx2) np.testing.assert_allclose(dw1, dw2) @jt.flag_scope(use_acl=1) def test_matmul(self): # x = jt.rand(10, 3, 50, 50) # w = jt.rand(4,3,3,3) x = jt.rand(10,10) w = jt.rand(10,10) y = jt.matmul(x, w) ny = np.matmul(x.numpy(), w.numpy()) np.testing.assert_allclose(y.numpy(), ny, atol=1e-3, rtol=1e-3) # y.sync(True) @jt.flag_scope(use_acl=1) def test_max(self): x = jt.rand(3,3) y = x.max(1).data ny = x.data.max(1) np.testing.assert_allclose(y, ny) @jt.flag_scope(use_acl=1) def test_sum(self): x = jt.rand(3,3).float16() print(x) # return y = x.sum(1).data print(y) print(x) ny = x.data.sum(1) np.testing.assert_allclose(y, ny) @jt.flag_scope(use_acl=1) def test_broadcast(self): x = jt.rand(3) # print(x) y = x.broadcast([3,3]).data ny = np.broadcast_arrays(x.data, y)[0] np.testing.assert_allclose(y, ny) print(x, y) # y = x.broadcast([3,3], dims=[1]).data y = jt.broadcast(x, shape=(3,3), dims=[1]).data with jt.flag_scope(use_acl=0): ny = jt.broadcast(x, shape=(3,3), dims=[1]).data # ny = np.broadcast_arrays(x.data, y)[0] np.testing.assert_allclose(y, ny) print(x, y) @jt.flag_scope(use_acl=1) def test_resnet(self): from jittor.models import resnet50 net = resnet50() x = jt.rand(2,3,224,224) y = net(x) y.sync() def matmul(a, b): (n, m), k = a.shape, b.shape[-1] a = a.broadcast([n,m,k], dims=[2]) b = b.broadcast([n,m,k], dims=[0]) return (a*b).sum(dim=1) class Linear(Module): def __init__(self, in_features, out_features, bias=True): self.w = (jt.random((in_features, out_features))-0.5) / in_features**0.5 self.b = jt.random((out_features,))-0.5 if bias else None def execute(self, x): x = matmul(x, self.w) if self.b is not None: return x+self.b return x def relu(x): return jt.maximum(x, 0.0) Relu = jt.make_module(relu) class Model(Module): def __init__(self, input_size): self.linear1 = Linear(input_size, 10) self.relu1 = Relu() self.linear2 = Linear(10, 1) def execute(self, x): x = self.linear1(x) x = self.relu1(x) return self.linear2(x) @unittest.skipIf(not jt.compiler.has_acl, "No ACL found") class TestExample(unittest.TestCase): @jt.flag_scope(use_acl=1) def test1(self): np.random.seed(0) jt.set_seed(3) n = 1000 batch_size = 50 lr = 0.05 def get_data(n): for i in range(n): x = np.random.rand(batch_size, 1).astype("float32") y = x*x yield jt.float32(x), jt.float32(y) model = Model(input_size=1) ps = model.parameters() for i,(x,y) in enumerate(get_data(n)): jt.sync_all(True) pred_y = model(x).name("pred_y") loss = ((pred_y - y).sqr()).name("loss") loss_mean = loss.mean() gs = jt.grad(loss_mean, ps) for p, g in zip(ps, gs): p -= g * lr if i>2: assert prev == jt.liveness_info(), f"memory leak {prev} {jt.liveness_info()}" prev = jt.liveness_info() print(f"step {i}, loss = {loss_mean.data.sum()} {jt.liveness_info()}") possible_results = [ 0.0009948202641680837, 0.001381353591568768, 0.00110957445576787, ] loss_mean = loss_mean.data assert any(abs(loss_mean - r) < 1e-6 for r in possible_results) jt.clean() if __name__ == "__main__": unittest.main()
5,671
invalidate pages cache
# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re import threading from pathlib import Path from typing import Any, Callable, Dict, Optional, Tuple, cast from blinker import Signal from streamlit.logger import get_logger from streamlit.string_util import extract_leading_emoji from streamlit.util import calc_md5 LOGGER = get_logger(__name__) def open_python_file(filename): """Open a read-only Python file taking proper care of its encoding. In Python 3, we would like all files to be opened with utf-8 encoding. However, some author like to specify PEP263 headers in their source files with their own encodings. In that case, we should respect the author's encoding. """ import tokenize if hasattr(tokenize, "open"): # Added in Python 3.2 # Open file respecting PEP263 encoding. If no encoding header is # found, opens as utf-8. return tokenize.open(filename) else: return open(filename, "r", encoding="utf-8") PAGE_FILENAME_REGEX = re.compile(r"([0-9]*)[_ -]*(.*)\.py") def page_sort_key(script_path: Path) -> Tuple[float, str]: matches = re.findall(PAGE_FILENAME_REGEX, script_path.name) # Failing this assert should only be possible if script_path isn't a Python # file, which should never happen. assert len(matches) > 0, f"{script_path} is not a Python file" [(number, label)] = matches label = label.lower() if number == "": return (float("inf"), label) return (float(number), label) def page_icon_and_name(script_path: Path) -> Tuple[str, str]: """Compute the icon and name of a page from its script path. This is *almost* the page name displayed in the nav UI, but it has underscores instead of spaces. The reason we do this is because having spaces in URLs both looks bad and is hard to deal with due to the need to URL-encode them. To solve this, we only swap the underscores for spaces right before we render page names. """ extraction = re.search(PAGE_FILENAME_REGEX, script_path.name) if extraction is None: return "", "" # This cast to Any+type annotation weirdness is done because # cast(re.Match[str], ...) explodes at runtime since Python interprets it # as an attempt to index into re.Match instead of as a type annotation. extraction: re.Match[str] = cast(Any, extraction) icon_and_name = re.sub( r"[_ ]+", "_", extraction.group(2) ).strip() or extraction.group(1) return extract_leading_emoji(icon_and_name) _pages_cache_lock = threading.RLock() _cached_pages: Optional[Dict[str, Dict[str, str]]] = None _on_pages_changed = Signal(doc="Emitted when the pages directory is changed") def METHOD_NAME(): global _cached_pages LOGGER.debug("Pages directory changed") with _pages_cache_lock: _cached_pages = None _on_pages_changed.send() def get_pages(main_script_path_str: str) -> Dict[str, Dict[str, str]]: global _cached_pages # Avoid taking the lock if the pages cache hasn't been invalidated. pages = _cached_pages if pages is not None: return pages with _pages_cache_lock: # The cache may have been repopulated while we were waiting to grab # the lock. if _cached_pages is not None: return _cached_pages main_script_path = Path(main_script_path_str) main_page_icon, main_page_name = page_icon_and_name(main_script_path) main_page_script_hash = calc_md5(main_script_path_str) # NOTE: We include the page_script_hash in the dict even though it is # already used as the key because that occasionally makes things # easier for us when we need to iterate over pages. pages = { main_page_script_hash: { "page_script_hash": main_page_script_hash, "page_name": main_page_name, "icon": main_page_icon, "script_path": str(main_script_path.resolve()), } } pages_dir = main_script_path.parent / "pages" page_scripts = sorted( [ f for f in pages_dir.glob("*.py") if not f.name.startswith(".") and not f.name == "__init__.py" ], key=page_sort_key, ) for script_path in page_scripts: script_path_str = str(script_path.resolve()) pi, pn = page_icon_and_name(script_path) psh = calc_md5(script_path_str) pages[psh] = { "page_script_hash": psh, "page_name": pn, "icon": pi, "script_path": script_path_str, } _cached_pages = pages return pages def register_pages_changed_callback( callback: Callable[[str], None], ): def disconnect(): _on_pages_changed.disconnect(callback) # weak=False so that we have control of when the pages changed # callback is deregistered. _on_pages_changed.connect(callback, weak=False) return disconnect
5,672
weight prune
import os import os.path as osp import sys import time import argparse from pdb import set_trace as st import json import random import time import pickle import torch import numpy as np import torchvision import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchcontrib from torchvision import transforms from advertorch.attacks import LinfPGDAttack from dataset.cub200 import CUB200Data from dataset.mit67 import MIT67Data from dataset.stanford_dog import SDog120Data from dataset.stanford_40 import Stanford40Data from dataset.flower102 import Flower102Data from model.fe_resnet import feresnet18, feresnet50, feresnet101 from eval_robustness import advtest, myloss from utils import * from .nc_pruner import NCPruner class NCWeightRankPruner(NCPruner): def __init__( self, args, model, teacher, train_loader, test_loader, ): super(NCWeightRankPruner, self).__init__( args, model, teacher, train_loader, test_loader ) def load_nc_info(self,): path = osp.join(self.args.nc_info_dir, "accumulate_coverage.pkl") with open(path, "rb") as f: accumulate_coverage = pickle.load(f, ) path = osp.join(self.args.nc_info_dir, "log_module_names.pkl") with open(path, "rb") as f: log_names = pickle.load(f, ) return accumulate_coverage, log_names def METHOD_NAME( self, prune_ratio, random_prune=False, ): model = self.model.cpu() total_weight = 0 layer_to_rank = {} for name, module in model.named_modules(): if ( isinstance(module, nn.Conv2d) ): total_weight += module.weight.numel() layer_to_rank[name] = module.weight.data.clone().numpy() layer_to_rank[name].fill(0) accumulate_coverage, log_names = self.load_nc_info() all_weight_coverage, adv_weight_coverage = [], [] for layer_name, (input_coverage, output_coverage) in accumulate_coverage.items(): input_dim, output_dim = len(input_coverage), len(output_coverage) for input_idx in range(input_dim): for output_idx in range(output_dim): coverage_score = input_coverage[input_idx] + output_coverage[output_idx] all_weight_coverage.append((coverage_score, (layer_name, input_idx, output_idx))) # prune_ratio = 0.05 sorted_coverage = sorted(all_weight_coverage, key=lambda item: item[0]) accumulate_index = 0 for (coverage_score, pos) in sorted_coverage: layer_name, input_idx, output_idx = pos layer_to_rank[layer_name][output_idx, input_idx] = accumulate_index h, w = layer_to_rank[layer_name].shape[2:] accumulate_index += h*w start = time.time() layer_idx = 0 weight_list = [] for name, module in model.named_modules(): if ( isinstance(module, nn.Conv2d) ): weight_copy = module.weight.data.abs().clone().numpy() output_dim, input_dim, h, w = weight_copy.shape for output_idx in range(output_dim): for input_idx in range(input_dim): for h_idx in range(h): for w_idx in range(w): weight_score = weight_copy[output_idx, input_idx, h_idx, w_idx] weight_list.append( (weight_score, (layer_idx, input_idx, output_idx, h_idx, w_idx)) ) layer_idx += 1 sorted_weight = sorted(weight_list, key=lambda item: item[0]) end = time.time() weight_sort_time = end - start log = f"Sort weight time {weight_sort_time}" self.prune_record(log) for weight_rank, (weight_score, pos) in enumerate(sorted_weight): layer_idx, input_idx, output_idx, h_idx, w_idx = pos layer_name = log_names[layer_idx] layer_to_rank[layer_name][output_idx, input_idx, h_idx, w_idx] -= weight_rank start = time.time() nc_weight_ranks = [] for layer_name in log_names: nc_weight_ranks.append( layer_to_rank[layer_name].flatten() ) nc_weight_ranks = np.concatenate(nc_weight_ranks) nc_weight_ranks = np.sort(nc_weight_ranks) end = time.time() weight_sort_time = end - start log = f"Sort nc weight rank time {weight_sort_time}" self.prune_record(log) total = len(nc_weight_ranks) thre_index = int(total * prune_ratio) if thre_index == total: thre_index -= 1 thre = nc_weight_ranks[thre_index] log = f"Pruning threshold: {thre:.4f}" self.prune_record(log) pruned = 0 for name, module in model.named_modules(): if ( isinstance(module, nn.Conv2d) ): mask = layer_to_rank[name] mask = torch.Tensor(mask > thre) pruned = pruned + mask.numel() - torch.sum(mask) # np.random.shuffle(mask) module.weight.data.mul_(mask) remain_ratio = int(torch.sum(mask)) / mask.numel() log = (f"layer {name} \t total params: {mask.numel()} \t " f"remaining params: {int(torch.sum(mask))}({remain_ratio:.2f})") self.prune_record(log) log = (f"Total conv params: {total_weight}, Pruned conv params: {pruned}, " f"Pruned ratio: {pruned/total_weight:.2f}") self.prune_record(log) self.model = model.cuda() self.check_param_num() def final_check_param_num(self): self.logger = open(self.log_path, "a") self.check_param_num() self.logger.close()
5,673
set parameters
# Copyright (c) 2017 The University of Manchester # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy from spinn_utilities.overrides import overrides from spinn_front_end_common.interface.ds import DataType from spinn_front_end_common.utilities.constants import BYTES_PER_WORD from spynnaker.pyNN.data import SpynnakerDataView from spynnaker.pyNN.exceptions import SpynnakerException from .abstract_current_source import AbstractCurrentSource, CurrentSourceIDs class ACSource(AbstractCurrentSource): """ AC current source (i.e. sine wave) turned on at "start" and off at "stop", given (y-)offset, amplitude, frequency and phase. """ __slots__ = [ "__start", "__stop", "__amplitude", "__offset", "__frequency", "__phase", "__parameters", "__parameter_types"] def __init__(self, start=0.0, stop=0.0, amplitude=0.0, offset=0.0, frequency=0.0, phase=0.0): """ :param float start: :param float stop: :param float amplitude: :param float offset: :param float frequency: :param float phase: """ # There's probably no need to actually store these as you can't # access them directly in pynn anyway time_convert_ms = SpynnakerDataView.get_simulation_time_step_per_ms() self.__start = start * time_convert_ms self.__stop = stop * time_convert_ms self.__amplitude = amplitude self.__offset = offset self.__frequency = self._get_frequency(frequency) self.__phase = self._get_phase(phase) self.__parameter_types = dict() self.__parameter_types['start'] = DataType.UINT32 self.__parameter_types['stop'] = DataType.UINT32 self.__parameter_types['amplitude'] = DataType.S1615 self.__parameter_types['offset'] = DataType.S1615 self.__parameter_types['frequency'] = DataType.S1615 self.__parameter_types['phase'] = DataType.S1615 self.__parameters = dict() self.__parameters['start'] = self.__start self.__parameters['stop'] = self.__stop self.__parameters['amplitude'] = self.__amplitude self.__parameters['offset'] = self.__offset self.__parameters['frequency'] = self.__frequency self.__parameters['phase'] = self.__phase super().__init__() @overrides(AbstractCurrentSource.METHOD_NAME) def METHOD_NAME(self, **parameters): for key, value in parameters.items(): if key not in self.__parameters.keys(): # throw an exception raise SpynnakerException(f"{key} is not a parameter of {self}") if key == 'frequency': self.__parameters[key] = self._get_frequency(value) elif key == 'phase': self.__parameters[key] = self._get_phase(value) else: self.__parameters[key] = value # Parameters have been set, so if multi-run then it will have been # injected already; if not then it can just be ignored if self.app_vertex is not None: for m_vertex in self.app_vertex.machine_vertices: m_vertex.set_reload_required(True) @property @overrides(AbstractCurrentSource.get_parameters) def get_parameters(self): return self.__parameters @property @overrides(AbstractCurrentSource.get_parameter_types) def get_parameter_types(self): return self.__parameter_types @property @overrides(AbstractCurrentSource.current_source_id) def current_source_id(self): return CurrentSourceIDs.AC_SOURCE.value @overrides(AbstractCurrentSource.get_sdram_usage_in_bytes) def get_sdram_usage_in_bytes(self): return len(self.__parameters) * BYTES_PER_WORD def _get_frequency(self, frequency): """ Convert frequency to radian-friendly value. :rtype: float """ # convert frequency and phase into radians, remembering that # frequency is given in Hz but we are using ms for timesteps return (frequency * 2 * numpy.pi) / 1000.0 def _get_phase(self, phase): """ Convert phase to radian-friendly value. :rtype: float """ return phase * (numpy.pi / 180.0)
5,674
augment
# Copyright The Cloud Custodian Authors. # SPDX-License-Identifier: Apache-2.0 from botocore.exceptions import ClientError import json from c7n.actions import RemovePolicyBase from c7n.filters import CrossAccountAccessFilter from c7n.query import QueryResourceManager, TypeInfo from c7n.manager import resources from c7n.utils import get_retry, local_session, type_schema @resources.register('glacier') class Glacier(QueryResourceManager): permissions = ('glacier:ListTagsForVault',) retry = staticmethod(get_retry(('Throttled',))) class resource_type(TypeInfo): service = 'glacier' enum_spec = ('list_vaults', 'VaultList', None) name = id = "VaultName" arn = "VaultARN" arn_type = 'vaults' universal_taggable = True def METHOD_NAME(self, resources): def process_tags(resource): client = local_session(self.session_factory).client('glacier') tag_dict = self.retry( client.list_tags_for_vault, vaultName=resource[self.get_model().name])['Tags'] tag_list = [] for k, v in tag_dict.items(): tag_list.append({'Key': k, 'Value': v}) resource['Tags'] = tag_list return resource with self.executor_factory(max_workers=2) as w: return list(w.map(process_tags, resources)) @Glacier.filter_registry.register('cross-account') class GlacierCrossAccountAccessFilter(CrossAccountAccessFilter): """Filter to return all glacier vaults with cross account access permissions The whitelist parameter will omit the accounts that match from the return :example: .. code-block: policies: - name: check-glacier-cross-account resource: glacier filters: - type: cross-account whitelist: - permitted-account-01 - permitted-account-02 """ permissions = ('glacier:GetVaultAccessPolicy',) def process(self, resources, event=None): def _augment(r): client = local_session( self.manager.session_factory).client('glacier') try: r['Policy'] = client.get_vault_access_policy( vaultName=r['VaultName'])['policy']['Policy'] return r except ClientError as e: if e.response['Error']['Code'] == 'AccessDeniedException': self.log.warning( "Access denied getting policy glacier:%s", r['FunctionName']) self.log.debug("fetching policy for %d glacier" % len(resources)) with self.executor_factory(max_workers=3) as w: resources = list(filter(None, w.map(_augment, resources))) return super(GlacierCrossAccountAccessFilter, self).process( resources, event) @Glacier.action_registry.register('remove-statements') class RemovePolicyStatement(RemovePolicyBase): """Action to remove policy statements from Glacier :example: .. code-block:: yaml policies: - name: glacier-cross-account resource: glacier filters: - type: cross-account actions: - type: remove-statements statement_ids: matched """ permissions = ('glacier:SetVaultAccessPolicy', 'glacier:GetVaultAccessPolicy') def process(self, resources): results = [] client = local_session(self.manager.session_factory).client('glacier') for r in resources: try: results += filter(None, [self.process_resource(client, r)]) except Exception: self.log.exception( "Error processing glacier:%s", r['VaultARN']) return results def process_resource(self, client, resource): if 'Policy' not in resource: try: resource['Policy'] = client.get_vault_access_policy( vaultName=resource['VaultName'])['policy']['Policy'] except ClientError as e: if e.response['Error']['Code'] != "ResourceNotFoundException": raise resource['Policy'] = None if not resource['Policy']: return p = json.loads(resource['Policy']) statements, found = self.process_policy( p, resource, CrossAccountAccessFilter.annotation_key) if not found: return if not statements: client.delete_vault_access_policy( vaultName=resource['VaultName']) else: client.set_vault_access_policy( vaultName=resource['VaultName'], policy={'Policy': json.dumps(p)} ) return {'Name': resource['VaultName'], 'State': 'PolicyRemoved', 'Statements': found} @Glacier.action_registry.register('delete') class GlacierVaultDelete(RemovePolicyBase): """Action to delete glacier vaults :example: .. code-block:: yaml policies: - name: glacier-vault-delete resource: aws.glacier filters: - type: cross-account actions: - type: delete """ schema = type_schema('delete') permissions = ('glacier:DeleteVault',) def process(self, resources): client = local_session(self.manager.session_factory).client('glacier') for r in resources: self.manager.retry(client.delete_vault, vaultName=r['VaultName'], ignore_err_codes=( 'ResourceNotFoundException',))
5,675
banner
# -*- coding: utf-8 -*- """ The Sage ZMQ Kernel Version of the Jupyter kernel when running Sage inside the Jupyter notebook or remote Jupyter sessions. """ # *************************************************************************** # Copyright (C) 2015 Volker Braun <vbraun.name@gmail.com> # # Distributed under the terms of the GNU General Public License (GPL) # as published by the Free Software Foundation; either version 2 of # the License, or (at your option) any later version. # https://www.gnu.org/licenses/ # *************************************************************************** import sys from ipykernel.ipkernel import IPythonKernel from ipykernel.zmqshell import ZMQInteractiveShell from traitlets import Type from sage.env import SAGE_VERSION from sage.repl.interpreter import SageNotebookInteractiveShell from sage.repl.ipython_extension import SageJupyterCustomizations class SageZMQInteractiveShell(SageNotebookInteractiveShell, ZMQInteractiveShell): pass class SageKernel(IPythonKernel): implementation = 'sage' implementation_version = SAGE_VERSION shell_class = Type(SageZMQInteractiveShell) def __init__(self, **kwds): """ The Sage Jupyter Kernel INPUT: See the Jupyter documentation EXAMPLES:: sage: from sage.repl.ipython_kernel.kernel import SageKernel sage: SageKernel.__new__(SageKernel) <sage.repl.ipython_kernel.kernel.SageKernel object at 0x...> """ super().__init__(**kwds) SageJupyterCustomizations(self.shell) @property def METHOD_NAME(self): r""" The Sage Banner The value of this property is displayed in the Jupyter notebook. OUTPUT: String. EXAMPLES:: sage: from sage.repl.ipython_kernel.kernel import SageKernel sage: sk = SageKernel.__new__(SageKernel) sage: print(sk.banner) ┌...SageMath version... """ from sage.misc.METHOD_NAME import banner_text return banner_text() @property def help_links(self): r""" Help in the Jupyter Notebook OUTPUT: See the Jupyter documentation. .. NOTE:: Urls starting with "kernelspecs" are prepended by the browser with the appropriate path. EXAMPLES:: sage: from sage.repl.ipython_kernel.kernel import SageKernel sage: sk = SageKernel.__new__(SageKernel) sage: sk.help_links [{'text': 'Sage Documentation', 'url': 'kernelspecs/sagemath/doc/html/en/index.html'}, ...] """ from sage.repl.ipython_kernel.install import SageKernelSpec identifier = SageKernelSpec.identifier() def kernel_url(x): return 'kernelspecs/{0}/{1}'.format(identifier, x) return [ { 'text': 'Sage Documentation', 'url': kernel_url('doc/html/en/index.html'), }, { 'text': 'Tutorial', 'url': kernel_url('doc/html/en/tutorial/index.html'), }, { 'text': 'Thematic Tutorials', 'url': kernel_url('doc/html/en/thematic_tutorials/index.html'), }, { 'text': 'FAQs', 'url': kernel_url('doc/html/en/faq/index.html'), }, { 'text': 'PREP Tutorials', 'url': kernel_url('doc/html/en/prep/index.html'), }, { 'text': 'Reference', 'url': kernel_url('doc/html/en/reference/index.html'), }, { 'text': "Developer's Guide", 'url': kernel_url('doc/html/en/developer/index.html'), }, { 'text': "Python", 'url': "http://docs.python.org/%i.%i" % sys.version_info[:2], }, { 'text': "IPython", 'url': "http://ipython.org/documentation.html", }, { 'text': 'Singular', 'url': 'http://www.singular.uni-kl.de/Manual/latest/index.htm', }, { 'text': 'GAP', 'url': 'http://gap-system.org/Manuals/doc/ref/chap0.html', }, { 'text': "NumPy", 'url': "http://docs.scipy.org/doc/numpy/reference/", }, { 'text': "SciPy", 'url': "http://docs.scipy.org/doc/scipy/reference/", }, { 'text': "SymPy", 'url': 'http://docs.sympy.org/latest/index.html', }, { 'text': "Matplotlib", 'url': "https://matplotlib.org/contents.html", }, { 'text': "Markdown", 'url': "http://help.github.com/articles/github-flavored-markdown", }, ] def pre_handler_hook(self): """ Restore the signal handlers to their default values at Sage startup, saving the old handler at the ``saved_sigint_handler`` attribute. This is needed because Jupyter needs to change the ``SIGINT`` handler. See :trac:`19135`. TESTS:: sage: from sage.repl.ipython_kernel.kernel import SageKernel sage: k = SageKernel.__new__(SageKernel) sage: k.pre_handler_hook() sage: k.saved_sigint_handler <cyfunction python_check_interrupt at ...> """ from cysignals import init_cysignals self.saved_sigint_handler = init_cysignals()
5,676
test gridspec stretch with replacement pane
import pytest from bokeh.models import Div from panel.depends import depends from panel.layout import GridBox, GridSpec, Spacer from panel.widgets import IntSlider def test_gridspec_cleanup(document, comm): spacer = Spacer() gspec = GridSpec() gspec[0, 0] = spacer model = gspec.get_root(document, comm) ref = model.ref['id'] assert ref in gspec._models assert ref in spacer._models gspec._cleanup(model) assert ref not in gspec._models assert ref not in spacer._models def test_gridspec_integer_setitem(): div = Div() gspec = GridSpec() gspec[0, 0] = div assert list(gspec.objects) == [(0, 0, 1, 1)] def test_gridspec_clone(): div = Div() gspec = GridSpec() gspec[0, 0] = div clone = gspec.clone() assert gspec.objects == clone.objects assert gspec.param.values() == clone.param.values() def test_gridspec_slice_setitem(): div = Div() gspec = GridSpec() gspec[0, :] = div assert list(gspec.objects) == [(0, None, 1, None)] def test_gridspec_setitem_int_overlap(): div = Div() gspec = GridSpec(mode='error') gspec[0, 0] = div with pytest.raises(IndexError): gspec[0, 0] = 'String' def test_gridspec_setitem_slice_overlap(): div = Div() gspec = GridSpec(mode='error') gspec[0, :] = div with pytest.raises(IndexError): gspec[0, 1] = div def test_gridspec_setitem_cell_override(): div = Div() div2 = Div() gspec = GridSpec() gspec[0, 0] = div gspec[0, 0] = div2 assert (0, 0, 1, 1) in gspec.objects assert gspec.objects[(0, 0, 1, 1)].object is div2 def test_gridspec_setitem_span_override(): div = Div() div2 = Div() gspec = GridSpec() gspec[0, :] = div gspec[0, 0] = div2 assert (0, 0, 1, 1) in gspec.objects assert gspec.objects[(0, 0, 1, 1)].object is div2 def test_gridspec_fixed_with_int_setitem(document, comm): div1 = Div() div2 = Div() gspec = GridSpec(width=800, height=500) gspec[0, 0] = div1 gspec[1, 1] = div2 model = gspec.get_root(document, comm=comm) assert model.children == [(div1, 0, 0, 1, 1), (div2, 1, 1, 1, 1)] assert div1.width == 400 assert div1.height == 250 assert div2.width == 400 assert div2.height == 250 def test_gridspec_fixed_with_slice_setitem(document, comm): div1 = Div() div2 = Div() gspec = GridSpec(width=900, height=500) gspec[0, 0:2] = div1 gspec[1, 2] = div2 model = gspec.get_root(document, comm=comm) assert model.children == [(div1, 0, 0, 1, 2), (div2, 1, 2, 1, 1)] assert div1.width == 600 assert div1.height == 250 assert div2.width == 300 assert div2.height == 250 def test_gridspec_fixed_with_upper_partial_slice_setitem(document, comm): div1 = Div() div2 = Div() gspec = GridSpec(width=900, height=500) gspec[0, :2] = div1 gspec[1, 2] = div2 model = gspec.get_root(document, comm=comm) assert model.children == [(div1, 0, 0, 1, 2), (div2, 1, 2, 1, 1)] assert div1.width == 600 assert div1.height == 250 assert div2.width == 300 assert div2.height == 250 def test_gridspec_fixed_with_lower_partial_slice_setitem(document, comm): div1 = Div() div2 = Div() gspec = GridSpec(width=900, height=500) gspec[0, 1:] = div1 gspec[1, 2] = div2 model = gspec.get_root(document, comm=comm) assert model.children == [(div1, 0, 1, 1, 2), (div2, 1, 2, 1, 1)] assert div1.width == 600 assert div1.height == 250 assert div2.width == 300 assert div2.height == 250 def test_gridspec_fixed_with_empty_slice_setitem(document, comm): div1 = Div() div2 = Div() gspec = GridSpec(width=900, height=500) gspec[0, :] = div1 gspec[1, 2] = div2 model = gspec.get_root(document, comm=comm) assert model.children == [(div1, 0, 0, 1, 3), (div2, 1, 2, 1, 1)] assert div1.width == 900 assert div1.height == 250 assert div2.width == 300 assert div2.height == 250 def test_gridspec_stretch_with_int_setitem(document, comm): div1 = Div() div2 = Div() gspec = GridSpec(sizing_mode='stretch_both') gspec[0, 0] = div1 gspec[1, 1] = div2 model = gspec.get_root(document, comm=comm) assert model.children == [(div1, 0, 0, 1, 1), (div2, 1, 1, 1, 1)] assert div1.sizing_mode == 'stretch_both' assert div2.sizing_mode == 'stretch_both' def test_gridspec_stretch_with_slice_setitem(document, comm): div1 = Div() div2 = Div() gspec = GridSpec(sizing_mode='stretch_both') gspec[0, 0:2] = div1 gspec[1, 2] = div2 model = gspec.get_root(document, comm=comm) assert model.children == [(div1, 0, 0, 1, 2), (div2, 1, 2, 1, 1)] assert div1.sizing_mode == 'stretch_both' assert div2.sizing_mode == 'stretch_both' def test_gridspec_fixed_with_replacement_pane(document, comm): slider = IntSlider(start=0, end=2) @depends(slider) def div(value): return Div(text=str(value)) gspec = GridSpec(width=600, height=600) gspec[0, 0:2] = Div() gspec[1, 2] = div model = gspec.get_root(document, comm=comm) ((div1, _, _, _, _), (row, _, _, _, _)) = model.children div2 = row.children[0] assert div1.width == 400 assert div1.height == 300 assert div2.width == 200 assert div2.height == 300 slider.value = 1 assert row.children[0] is not div2 assert row.children[0].width == 200 assert row.children[0].height == 300 def METHOD_NAME(document, comm): slider = IntSlider(start=0, end=2) @depends(slider) def div(value): return Div(text=str(value)) gspec = GridSpec(sizing_mode='stretch_width', height=600) gspec[0, 0:2] = Div() gspec[1, 2] = div model = gspec.get_root(document, comm=comm) ((div1, _, _, _, _), (row, _, _, _, _)) = model.children div2 = row.children[0] assert div1.sizing_mode == 'stretch_width' assert div1.height == 300 assert div2.sizing_mode == 'stretch_width' assert div2.height == 300 slider.value = 1 assert row.children[0] is not div2 assert row.children[0].sizing_mode == 'stretch_width' assert row.children[0].height == 300 def test_gridbox_ncols(document, comm): grid_box = GridBox(Div(), Div(), Div(), Div(), Div(), Div(), Div(), Div(), ncols=3) model = grid_box.get_root(document, comm=comm) assert len(model.children) == 8 coords = [ (0, 0, 1, 1), (0, 1, 1, 1), (0, 2, 1, 1), (1, 0, 1, 1), (1, 1, 1, 1), (1, 2, 1, 1), (2, 0, 1, 1), (2, 1, 1, 1) ] for child, coord in zip(model.children, coords): assert child[1:] == coord def test_gridbox_nrows(document, comm): grid_box = GridBox(Div(), Div(), Div(), Div(), Div(), Div(), Div(), Div(), nrows=2) model = grid_box.get_root(document, comm=comm) assert len(model.children) == 8 coords = [ (0, 0, 1, 1), (0, 1, 1, 1), (0, 2, 1, 1), (0, 3, 1, 1), (1, 0, 1, 1), (1, 1, 1, 1), (1, 2, 1, 1), (1, 3, 1, 1) ] for child, coord in zip(model.children, coords): assert child[1:] == coord def test_gridspec_fixed_ncols(): grid = GridSpec(ncols=3) for index in range(5): grid[index, :] = "Hello World" def test_gridspec_fixed_nrows(): grid = GridSpec(nrows=3) for index in range(5): grid[:, index] = "Hello World"
5,677
build fake yaml2
"""Tests for neural_compressor quantization.""" import importlib import os import shutil import unittest import numpy as np import yaml def build_fake_yaml(): fake_yaml = """ model: name: fake_yaml framework: tensorflow inputs: x outputs: identity quantization: recipes: fast_bias_correction: True calibration: sampling_size: 10 device: cpu evaluation: accuracy: metric: topk: 1 tuning: accuracy_criterion: relative: 0.01 workspace: path: saved """ y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) with open("fake_yaml.yaml", "w", encoding="utf-8") as f: yaml.dump(y, f) f.close() def METHOD_NAME(): fake_yaml = """ model: name: fake_yaml framework: tensorflow inputs: x outputs: identity quantization: recipes: weight_correction: True calibration: sampling_size: 10 device: cpu evaluation: accuracy: metric: topk: 1 tuning: accuracy_criterion: relative: 0.01 workspace: path: saved resume: ./saved/history.snapshot """ y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) with open("fake_yaml2.yaml", "w", encoding="utf-8") as f: yaml.dump(y, f) f.close() def build_fake_model(): import tensorflow as tf try: graph = tf.Graph() graph_def = tf.GraphDef() with tf.Session() as sess: x = tf.placeholder(tf.float32, shape=(1, 3, 3, 1), name="x") y = tf.constant(np.random.random((2, 2, 1, 1)), dtype=tf.float32, name="y") relu_0 = tf.nn.relu(x, name="relu") conv = tf.nn.conv2d(input=relu_0, filters=y, strides=[1, 1, 1, 1], padding="VALID", name="conv") bias = tf.Variable(tf.ones([1], tf.float32)) conv_add = tf.nn.bias_add(conv, bias, name="bias_add") relu = tf.nn.relu(conv_add) op = tf.identity(relu, name="identity") sess.run(tf.global_variables_initializer()) from tensorflow.compat.v1.graph_util import convert_variables_to_constants constant_graph = convert_variables_to_constants(sess, sess.graph_def, ["identity"]) graph_def.ParseFromString(constant_graph.SerializeToString()) with graph.as_default(): tf.import_graph_def(graph_def, name="") except: import tensorflow as tf graph = tf.Graph() graph_def = tf.compat.v1.GraphDef() with tf.compat.v1.Session() as sess: x = tf.compat.v1.placeholder(tf.float32, shape=(1, 3, 3, 1), name="x") y = tf.compat.v1.constant(np.random.random((2, 2, 1, 1)), dtype=tf.float32, name="y") relu_0 = tf.nn.relu(x, name="relu") conv = tf.nn.conv2d(input=relu_0, filters=y, strides=[1, 1, 1, 1], padding="VALID", name="conv") bias = tf.Variable(tf.ones([1], tf.float32)) conv_add = tf.nn.bias_add(conv, bias, name="bias_add") relu = tf.nn.relu(conv_add) op = tf.identity(relu, name="identity") sess.run(tf.compat.v1.global_variables_initializer()) from tensorflow.compat.v1.graph_util import convert_variables_to_constants constant_graph = convert_variables_to_constants(sess, sess.graph_def, ["identity"]) graph_def.ParseFromString(constant_graph.SerializeToString()) with graph.as_default(): tf.import_graph_def(graph_def, name="") return graph class TestQuantization(unittest.TestCase): @classmethod def setUpClass(self): self.constant_graph = build_fake_model() build_fake_yaml() METHOD_NAME() @classmethod def tearDownClass(self): os.remove("fake_yaml.yaml") os.remove("fake_yaml2.yaml") shutil.rmtree("./saved", ignore_errors=True) def test_fast_bias_correction(self): from neural_compressor.experimental import Quantization, common quantizer = Quantization("fake_yaml.yaml") dataset = quantizer.dataset("dummy", shape=(100, 3, 3, 1), label=True) quantizer.eval_dataloader = common.DataLoader(dataset) quantizer.calib_dataloader = common.DataLoader(dataset) quantizer.model = self.constant_graph output_graph = quantizer.fit() def test_weight_correction(self): from neural_compressor.experimental import Quantization, common quantizer = Quantization("fake_yaml2.yaml") dataset = quantizer.dataset("dummy", shape=(100, 3, 3, 1), label=True) quantizer.eval_dataloader = common.DataLoader(dataset) quantizer.calib_dataloader = common.DataLoader(dataset) quantizer.model = self.constant_graph output_graph = quantizer.fit() if __name__ == "__main__": unittest.main()
5,678
test unsigned max
""" Test functions for limits module. """ import warnings import numpy as np from numpy.core import finfo, iinfo from numpy import half, single, double, longdouble from numpy.testing import assert_equal, assert_, assert_raises from numpy.core.getlimits import _discovered_machar, _float_ma ################################################## class TestPythonFloat: def test_singleton(self): ftype = finfo(float) ftype2 = finfo(float) assert_equal(id(ftype), id(ftype2)) class TestHalf: def test_singleton(self): ftype = finfo(half) ftype2 = finfo(half) assert_equal(id(ftype), id(ftype2)) class TestSingle: def test_singleton(self): ftype = finfo(single) ftype2 = finfo(single) assert_equal(id(ftype), id(ftype2)) class TestDouble: def test_singleton(self): ftype = finfo(double) ftype2 = finfo(double) assert_equal(id(ftype), id(ftype2)) class TestLongdouble: def test_singleton(self): ftype = finfo(longdouble) ftype2 = finfo(longdouble) assert_equal(id(ftype), id(ftype2)) class TestFinfo: def test_basic(self): dts = list(zip(['f2', 'f4', 'f8', 'c8', 'c16'], [np.float16, np.float32, np.float64, np.complex64, np.complex128])) for dt1, dt2 in dts: for attr in ('bits', 'eps', 'epsneg', 'iexp', 'machep', 'max', 'maxexp', 'min', 'minexp', 'negep', 'nexp', 'nmant', 'precision', 'resolution', 'tiny', 'smallest_normal', 'smallest_subnormal'): assert_equal(getattr(finfo(dt1), attr), getattr(finfo(dt2), attr), attr) assert_raises(ValueError, finfo, 'i4') class TestIinfo: def test_basic(self): dts = list(zip(['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'], [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64])) for dt1, dt2 in dts: for attr in ('bits', 'min', 'max'): assert_equal(getattr(iinfo(dt1), attr), getattr(iinfo(dt2), attr), attr) assert_raises(ValueError, iinfo, 'f4') def METHOD_NAME(self): types = np.sctypes['uint'] for T in types: with np.errstate(over="ignore"): max_calculated = T(0) - T(1) assert_equal(iinfo(T).max, max_calculated) class TestRepr: def test_iinfo_repr(self): expected = "iinfo(min=-32768, max=32767, dtype=int16)" assert_equal(repr(np.iinfo(np.int16)), expected) def test_finfo_repr(self): expected = "finfo(resolution=1e-06, min=-3.4028235e+38," + \ " max=3.4028235e+38, dtype=float32)" assert_equal(repr(np.finfo(np.float32)), expected) def test_instances(): iinfo(10) finfo(3.0) def assert_ma_equal(discovered, ma_like): # Check MachAr-like objects same as calculated MachAr instances for key, value in discovered.__dict__.items(): assert_equal(value, getattr(ma_like, key)) if hasattr(value, 'shape'): assert_equal(value.shape, getattr(ma_like, key).shape) assert_equal(value.dtype, getattr(ma_like, key).dtype) def test_known_types(): # Test we are correctly compiling parameters for known types for ftype, ma_like in ((np.float16, _float_ma[16]), (np.float32, _float_ma[32]), (np.float64, _float_ma[64])): assert_ma_equal(_discovered_machar(ftype), ma_like) # Suppress warning for broken discovery of double double on PPC with np.errstate(all='ignore'): ld_ma = _discovered_machar(np.longdouble) bytes = np.dtype(np.longdouble).itemsize if (ld_ma.it, ld_ma.maxexp) == (63, 16384) and bytes in (12, 16): # 80-bit extended precision assert_ma_equal(ld_ma, _float_ma[80]) elif (ld_ma.it, ld_ma.maxexp) == (112, 16384) and bytes == 16: # IEE 754 128-bit assert_ma_equal(ld_ma, _float_ma[128]) def test_subnormal_warning(): """Test that the subnormal is zero warning is not being raised.""" with np.errstate(all='ignore'): ld_ma = _discovered_machar(np.longdouble) bytes = np.dtype(np.longdouble).itemsize with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') if (ld_ma.it, ld_ma.maxexp) == (63, 16384) and bytes in (12, 16): # 80-bit extended precision ld_ma.smallest_subnormal assert len(w) == 0 elif (ld_ma.it, ld_ma.maxexp) == (112, 16384) and bytes == 16: # IEE 754 128-bit ld_ma.smallest_subnormal assert len(w) == 0 else: # Double double ld_ma.smallest_subnormal # This test may fail on some platforms assert len(w) == 0 def test_plausible_finfo(): # Assert that finfo returns reasonable results for all types for ftype in np.sctypes['float'] + np.sctypes['complex']: info = np.finfo(ftype) assert_(info.nmant > 1) assert_(info.minexp < -1) assert_(info.maxexp > 1)
5,679
wait until available
#!/usr/bin/env python3 # Spins up four nodes, and alternates [test1, test2] and [test3, test4] as block producers every epoch # Makes sure that before the epoch switch each block is signed by all four import sys, time, base58, random, datetime import pathlib sys.path.append(str(pathlib.Path(__file__).resolve().parents[2] / 'lib')) from cluster import start_cluster from configured_logger import logger from transaction import sign_staking_tx EPOCH_LENGTH = 30 HEIGHT_GOAL = int(EPOCH_LENGTH * 7.5) TIMEOUT = HEIGHT_GOAL * 3 config = None nodes = start_cluster( 2, 2, 1, config, [["epoch_length", EPOCH_LENGTH], ["block_producer_kickout_threshold", 40]], { 0: { "view_client_throttle_period": { "secs": 0, "nanos": 0 }, "state_sync_enabled": True, "store.state_snapshot_enabled": True, "consensus": { "state_sync_timeout": { "secs": 0, "nanos": 500000000 } } }, 1: { "view_client_throttle_period": { "secs": 0, "nanos": 0 }, "state_sync_enabled": True, "store.state_snapshot_enabled": True, "consensus": { "state_sync_timeout": { "secs": 0, "nanos": 500000000 } } }, 2: { "tracked_shards": [0], "view_client_throttle_period": { "secs": 0, "nanos": 0 }, "state_sync_enabled": True, "store.state_snapshot_enabled": True, "consensus": { "state_sync_timeout": { "secs": 0, "nanos": 500000000 } } }, 3: { "view_client_throttle_period": { "secs": 0, "nanos": 0 }, "state_sync_enabled": True, "store.state_snapshot_enabled": True, "consensus": { "state_sync_timeout": { "secs": 0, "nanos": 500000000 } } } }) started = time.time() def get_validators(): return set([x['account_id'] for x in nodes[0].get_status()['validators']]) def get_stakes(): return [ int(nodes[2].get_account("test%s" % i)['result']['locked']) for i in range(3) ] seen_epochs = set() cur_vals = [0, 1] next_vals = [2, 3] height_to_num_approvals = {} largest_height = 0 next_nonce = 1 epoch_switch_height = -2 blocks_by_height = {} def METHOD_NAME(get_fn): while True: res = get_fn() logger.info(f"res: {res}") if 'result' in res: return res time.sleep(0.1) for largest_height in range(2, HEIGHT_GOAL + 1): assert time.time() - started < TIMEOUT block = METHOD_NAME( lambda: nodes[0].get_block_by_height(largest_height, timeout=5)) assert block is not None hash_ = block['result']['header']['hash'] epoch_id = block['result']['header']['epoch_id'] height = block['result']['header']['height'] assert height == largest_height blocks_by_height[height] = block logger.info("... %s" % height) logger.info(block['result']['header']['approvals']) # we expect no skipped heights height_to_num_approvals[height] = len( block['result']['header']['approvals']) logger.info( f"Added height_to_num_approvals {height}={len(block['result']['header']['approvals'])}" ) if height > epoch_switch_height + 2: prev_hash = None if (height - 1) in blocks_by_height: prev_hash = blocks_by_height[height - 1]['result']['header']['hash'] if prev_hash: for val_ord in next_vals: tx = sign_staking_tx(nodes[val_ord].signer_key, nodes[val_ord].validator_key, 0, next_nonce, base58.b58decode(prev_hash.encode('utf8'))) for target in range(0, 4): nodes[target].send_tx(tx) next_nonce += 1 for val_ord in cur_vals: tx = sign_staking_tx(nodes[val_ord].signer_key, nodes[val_ord].validator_key, 50000000000000000000000000000000, next_nonce, base58.b58decode(prev_hash.encode('utf8'))) for target in range(0, 4): nodes[target].send_tx(tx) next_nonce += 1 if epoch_id not in seen_epochs: seen_epochs.add(epoch_id) if height - 1 in blocks_by_height: prev_block = blocks_by_height[height - 1] assert prev_block['result']['header']['epoch_id'] != block[ 'result']['header']['epoch_id'] logger.info("EPOCH %s, VALS %s" % (epoch_id, get_validators())) if len(seen_epochs) > 2: # the first two epochs share the validator set logger.info( f"Checking height_to_num_approvals {height}, {height_to_num_approvals}" ) assert height_to_num_approvals[height] == 2 has_prev = height - 1 in height_to_num_approvals has_two_ago = height - 2 in height_to_num_approvals if has_prev: assert height_to_num_approvals[height - 1] == 4 if has_two_ago: assert height_to_num_approvals[height - 2] == 4 if has_prev and has_two_ago: for i in range(3, EPOCH_LENGTH): if height - i in height_to_num_approvals: assert height_to_num_approvals[height - i] == 2 else: for i in range(height): if i in height_to_num_approvals: assert height_to_num_approvals[i] == 2, ( i, height_to_num_approvals[i], height_to_num_approvals) cur_vals, next_vals = next_vals, cur_vals epoch_switch_height = height assert len(seen_epochs) > 3
5,680
parse sdf to dataframe
""" Utiles for data parsing""" import os import warnings import numpy as np import pandas as pd import datamol as dm from functools import partial from copy import copy import fsspec from loguru import logger from rdkit import Chem from rdkit.Chem.Descriptors import ExactMolWt from graphium.utils.tensor import parse_valid_args, arg_in_func def read_file(filepath, as_ext=None, **kwargs): r""" Allow to read different file format and parse them into a MolecularDataFrame. Supported formats are: * csv (.csv, .smile, .smiles, .tsv) * txt (.txt) * xls (.xls, .xlsx, .xlsm, .xls*) * sdf (.sdf) * pkl (.pkl) Arguments ----------- filepath: str The full path and name of the file to read. It also supports the s3 url path. as_ext: str, Optional The file extension used to read the file. If None, the extension is deduced from the extension of the file. Otherwise, no matter the file extension, the file will be read according to the specified ``as_ext``. (Default=None) **kwargs: All the optional parameters required for the desired file reader. TODO: unit test to make sure it works well with all extensions Returns --------- df: pandas.DataFrame The ``pandas.DataFrame`` containing the parsed data """ # Get the file extension if as_ext is None: file_ext = os.path.splitext(filepath)[-1].lower()[1:] else: file_ext = as_ext if not isinstance(file_ext, str): raise TypeError("`file_type` must be a `str`. Provided: {}".format(file_ext)) open_mode = "r" # Read the file according to the right extension if file_ext in ["csv", "smile", "smiles", "smi", "tsv"]: file_reader = pd.read_csv elif file_ext == "txt": file_reader = pd.read_table elif file_ext[0:3] == "xls": open_mode = "rb" file_reader = partial(pd.read_excel, engine="openpyxl") elif file_ext == "sdf": file_reader = METHOD_NAME elif file_ext == "pkl": open_mode = "rb" file_reader = pd.read_pickle else: raise 'File extension "{}" not supported'.format(file_ext) kwargs = parse_valid_args(fn=file_reader, param_dict=kwargs) if file_ext[0:3] not in ["sdf", "xls"]: with file_opener(filepath, open_mode) as file_in: data = file_reader(file_in, **kwargs) else: data = file_reader(filepath, **kwargs) return data def METHOD_NAME(sdf_path, as_cxsmiles=True, skiprows=None): r""" Allows to read an SDF file containing molecular informations, convert it to a pandas DataFrame and convert the molecules to SMILES. It also lists a warning of all the molecules that couldn't be read. Arguments ----------- sdf_path: str The full path and name of the sdf file to read as_cxsmiles: bool, optional Whether to use the CXSMILES notation, which preserves atomic coordinates, stereocenters, and much more. See `https://dl.chemaxon.com/marvin-archive/latest/help/formats/cxsmiles-doc.html` (Default = True) skiprows: int, list The rows to skip from dataset. The enumerate index starts from 1 insted of 0. (Default = None) """ # read the SDF file # locally or from s3 data = dm.read_sdf(sdf_path) # For each molecule in the SDF file, read all the properties and add it to a list of dict. # Also count the number of molecules that cannot be read. data_list = [] count_none = 0 if skiprows is not None: if isinstance(skiprows, int): skiprows = range(0, skiprows - 1) skiprows = np.array(skiprows) - 1 for idx, mol in enumerate(data): if (skiprows is not None) and (idx in skiprows): continue if (mol is not None) and (ExactMolWt(mol) > 0): mol_dict = mol.GetPropsAsDict() data_list.append(mol_dict) if as_cxsmiles: smiles = Chem.rdmolfiles.MolToCXSmiles(mol, canonical=True) else: smiles = dm.to_smiles(mol, canonical=True) data_list[-1]["SMILES"] = smiles else: count_none += 1 logger.info(f"Could not read molecule # {idx}") # Display a message or warning after the SDF is done parsing if count_none == 0: logger.info("Successfully read the SDF file without error: {}".format(sdf_path)) else: warnings.warn( ( 'Error reading {} molecules from the "{}" file.\ {} molecules read successfully.' ).format(count_none, sdf_path, len(data_list)) ) return pd.DataFrame(data_list) def file_opener(filename, mode="r"): """File reader stream""" filename = str(filename) if "w" in mode: filename = "simplecache::" + filename if filename.endswith(".gz"): instream = fsspec.open(filename, mode=mode, compression="gzip") else: instream = fsspec.open(filename, mode=mode) return instream
5,681
get firebase credential errors
import json from json import JSONDecodeError from firebase_admin import (delete_app as delete_firebase_instance, get_app as get_firebase_app, initialize_app as initialize_firebase_app) from firebase_admin.credentials import Certificate as FirebaseCertificate from constants.celery_constants import (ANDROID_FIREBASE_CREDENTIALS, BACKEND_FIREBASE_CREDENTIALS, FIREBASE_APP_TEST_NAME, IOS_FIREBASE_CREDENTIALS) from database.system_models import FileAsText MANUALLY_DISABLE_FIREBASE = False class FirebaseMisconfigured(Exception): pass # # Firebase app object instantiation and credential tests # (This code can probably be simplified with a threading.Lock object.) # def safely_get_db_credential(credential_type: str) -> str or None: """ If this function returns None then checks for push notification enablement will all fail. Set MANUALLY_DISABLE_FIREBASE to True to force-disable push notifications. """ if MANUALLY_DISABLE_FIREBASE: return None credentials = FileAsText.objects.filter(tag=credential_type).first() if credentials: return credentials.text else: return None def METHOD_NAME(credentials: str): """ Wrapper to get error strings for test_firebase_credential_errors because otherwise the code is gross. Returns None if no errors occurred. """ try: test_firebase_credential_errors(credentials) return None except Exception as e: return str(e) def test_firebase_credential_errors(credentials: str) -> None: """ Tests credentials by creating a temporary otherwise unused credential. """ try: encoded_credentials = json.loads(credentials) except JSONDecodeError: # need clean error message raise Exception("The credentials provided are not valid JSON.") # both of these raise ValueErrors, delete only fails if cert and app objects pass. cert = FirebaseCertificate(encoded_credentials) app = initialize_firebase_app(cert, name=FIREBASE_APP_TEST_NAME) delete_firebase_instance(app) def check_firebase_instance(require_android=False, require_ios=False) -> bool: """ Test the database state for the various creds. If creds are present determine whether the firebase app is already instantiated, if not call update_firebase_instance. """ active_creds = list(FileAsText.objects.filter( tag__in=[BACKEND_FIREBASE_CREDENTIALS, ANDROID_FIREBASE_CREDENTIALS, IOS_FIREBASE_CREDENTIALS] ).values_list("tag", flat=True)) if ( # keep those parens. BACKEND_FIREBASE_CREDENTIALS not in active_creds or (require_android and ANDROID_FIREBASE_CREDENTIALS not in active_creds) or (require_ios and IOS_FIREBASE_CREDENTIALS not in active_creds) ): return False if METHOD_NAME(safely_get_db_credential(BACKEND_FIREBASE_CREDENTIALS)): return False # avoid calling update so we never delete and then recreate the app (we get thrashed # during push notification send from calling this, its not thread-safe), overhead is low. try: get_firebase_app() except ValueError: # we don't care about extra work inside calling update_firebase_instance, it shouldn't be # hit too heavily. update_firebase_instance() return True def update_firebase_instance(recur_depth=3) -> None: """ Creates or destroys the firebase app, handling basic credential errors. """ junk_creds = False encoded_credentials = None # IDE complains try: encoded_credentials = json.loads(safely_get_db_credential(BACKEND_FIREBASE_CREDENTIALS)) except (JSONDecodeError, TypeError): junk_creds = True try: delete_firebase_instance(get_firebase_app()) except ValueError: # occurs when get_firebase_app() fails, delete_firebase_instance is only called if it succeeds. pass if junk_creds: return # can now ~safely initialize the firebase app, re-casting any errors for runime scenarios # errors at this point should only occur if the app has somehow gotten broken credentials. try: cert = FirebaseCertificate(encoded_credentials) except ValueError as e: raise FirebaseMisconfigured(str(e)) try: initialize_firebase_app(cert) except ValueError as e: # occasionally we do hit a race condition, handle that with 3 tries, comment in error message. if recur_depth >= 0: return update_firebase_instance(recur_depth - 1) raise FirebaseMisconfigured( "This error is usually caused by a race condition, please report it if this happens frequently: " + str(e) )
5,682
network to pergeos
import logging import numpy as np from openpnm.io import _parse_filename from openpnm.network import Network logger = logging.getLogger(__name__) def METHOD_NAME(network, filename=''): # avoid printing truncated array np.set_printoptions(threshold=np.inf) # Ensure network has PerGeos' expected properties if 'pore.EqRadius' not in network.props(): try: network['pore.EqRadius'] = network['pore.diameter']/2 except KeyError: network['pore.EqRadius'] = np.ones([network.Np, ]) s = ["# Avizo 3D ASCII 3.0\n\n"] s.append("define VERTEX " + str(network.Np) + '\n') s.append("define EDGE " + str(network.Nt) + '\n') s.append("define POINT " + str(2*network.Nt) + '\n\n') s.append("Parameters {\n\tContentType \"HxPoreNetworkModel\"\n}\n\n") types = {'b': 'int', 'i': 'int', 'f': 'float'} typemap = {} namemap = {} shapemap = {} propmap = {} i = 1 NumEdgePoints = 1 for item in network.keys(): typemap[item] = types[str(network[item].dtype)[0]] ncols = int(network[item].size/network[item].shape[0]) if ncols > 1: shapemap[item] = '[' + str(ncols) + ']' else: shapemap[item] = '' if item.startswith('pore'): element = 'pore', 'VERTEX' if item.startswith('throat'): element = 'throat', 'EDGE' n = item.replace(element[0] + '.', '').replace('.', '_').split('_') n = ''.join([i[0].upper()+i[1:] for i in n if len(i)]) namemap[item] = n temp = element[1] + " { " + typemap[item] + shapemap[item] + " " \ + namemap[item] + " } @" + str(i) + '\n' if temp.find('EdgeConnectivity') == -1: # replaces openpnm tags with the mandatory am file's tags if "Conns" in temp: temp = temp.replace("Conns", "EdgeConnectivity") elif "Coords" in temp: temp = temp.replace("Coords", "VertexCoordinates") s.append(temp) propmap[item] = str(i) if "NumEdgePoints" in temp: NumEdgePoints = 0 i += 1 if NumEdgePoints: temp = "EDGE { int NumEdgePoints" + " } @" + str(i) + '\n' s.append(temp) tempat = "@" + str(i) + '\n' i += 1 # Add POINT data s.append("POINT { float[3] EdgePointCoordinates } @" + str(i)) s.append("\n\n# Data section follows") for item in network.keys(): data = network[item] if item != 'throat.EdgeConnectivity': s.append('\n\n@' + propmap[item] + '\n') if shapemap[item] == '': data = np.atleast_2d(data).T if typemap[item] == 'float': formatter = {'float_kind': lambda x: "%.15E" % x} else: formatter = None if data.dtype == 'bool': data = data.astype(int) d = np.array2string(data, formatter=formatter) s.append(d.replace('[', '').replace(']', '').replace('\n ', '\n')) # Add POINT data s.append('\n\n@' + str(i) + '\n') formatter = {'float_kind': lambda x: "%.15E" % x} conns = network['throat.conns'] d = np.array2string(network['pore.coords'][conns], formatter=formatter) for r in (('[', ''), (']', ''), ('\n\n', '\n'), ('\n ', '\n'), ('\n ', '\n')): d = d.replace(*r) d += '\n' s.append(d) # Add NumEdgePoints if NumEdgePoints: s.append('\n\n' + tempat) s.append(''.join(['2' + '\n']*network.Nt)) # Write to file if filename == '': filename = network.name fname = _parse_filename(filename=filename, ext='am') with open(fname, 'w') as f: f.write(''.join(s)) def network_from_pergeos(filename): r""" Loads a network from a PerGeos file. Notes ----- PerGeos is the format used by the Avizo software. See `here for more details <https://cases.pergeos.com/>`_. """ net = {} # --------------------------------------------------------------------- # Parse the link1 file filename = _parse_filename(filename=filename, ext='am') with open(filename, mode='r') as f: Np = None Nt = None while (Np is None) or (Nt is None): s = f.readline()[:-1].split(' ') if s[0] == 'define': if s[1] == 'VERTEX': Np = int(s[2]) if s[1] == 'EDGE': Nt = int(s[2]) net = {} propmap = {} typemap = {} shapemap = {} while True: s = f.readline()[:-1].split(' ') if s[0] == 'VERTEX': dshape = [Np] if s[2].endswith(']'): ncols = int(s[2].split('[', 1)[1].split(']')[0]) dshape.append(ncols) dtype = s[2].split('[')[0] temp = np.zeros(dshape, dtype=dtype) net['pore.'+s[3]] = temp key = int(s[-1].replace('@', '')) propmap[key] = 'pore.'+s[3] typemap[key] = dtype shapemap[key] = dshape elif s[0] == 'EDGE': dshape = [Nt] if s[2].endswith(']'): ncols = int(s[2].split('[', 1)[1].split(']')[0]) dshape.append(ncols) dtype = s[2].split('[')[0] temp = np.zeros(dshape, dtype=dtype) net['throat.'+s[3]] = temp key = int(s[-1].replace('@', '')) propmap[key] = 'throat.'+s[3] typemap[key] = dtype shapemap[key] = dshape elif s[0] == '#': break s = f.read().split('@') for key in propmap.keys(): if key in s: data = s[key].split('\n')[1:] data = ' '.join(data) arr = np.fromstring(data, dtype=typemap[key], sep=' ') arr = np.reshape(arr, newshape=shapemap[key]) net[propmap[key]] = arr # End file parsing net['pore.coords'] = net['pore.VertexCoordinates'] net['throat.conns'] = np.sort(net['throat.EdgeConnectivity'], axis=1) network = Network() network.update(net) return network
5,683
shapes
"""$ rio shapes""" from __future__ import division import logging import click import cligj import rasterio from rasterio.rio import options from rasterio.features import dataset_features from rasterio.rio.helpers import write_features logger = logging.getLogger(__name__) @click.command(short_help="Write shapes extracted from bands or masks.") @options.file_in_arg @options.output_opt @cligj.precision_opt @cligj.indent_opt @cligj.compact_opt @cligj.projection_geographic_opt @cligj.projection_projected_opt @options.sequence_opt @cligj.use_rs_opt @cligj.geojson_type_feature_opt(True) @cligj.geojson_type_bbox_opt(False) @click.option('--band/--mask', default=True, help="Choose to extract from a band (the default) or a mask.") @click.option('--bidx', 'bandidx', type=int, default=None, help="Index of the band or mask that is the source of shapes.") @click.option('--sampling', type=int, default=1, help="Inverse of the sampling fraction; " "a value of 10 decimates.") @click.option('--with-nodata/--without-nodata', default=False, help="Include or do not include (the default) nodata regions.") @click.option('--as-mask/--not-as-mask', default=False, help="Interpret a band as a mask and output only one class of " "valid data shapes.") @click.pass_context def METHOD_NAME( ctx, input, output, precision, indent, compact, projection, sequence, use_rs, geojson_type, band, bandidx, sampling, with_nodata, as_mask): """Extracts shapes from one band or mask of a dataset and writes them out as GeoJSON. Unless otherwise specified, the shapes will be transformed to WGS 84 coordinates. The default action of this command is to extract shapes from the first band of the input dataset. The shapes are polygons bounding contiguous regions (or features) of the same raster value. This command performs poorly for int16 or float type datasets. Bands other than the first can be specified using the `--bidx` option: $ rio shapes --bidx 3 tests/data/RGB.byte.tif The valid data footprint of a dataset's i-th band can be extracted by using the `--mask` and `--bidx` options: $ rio shapes --mask --bidx 1 tests/data/RGB.byte.tif Omitting the `--bidx` option results in a footprint extracted from the conjunction of all band masks. This is generally smaller than any individual band's footprint. A dataset band may be analyzed as though it were a binary mask with the `--as-mask` option: $ rio shapes --as-mask --bidx 1 tests/data/RGB.byte.tif """ # These import numpy, which we don't want to do unless it's needed. dump_kwds = {'sort_keys': True} if indent: dump_kwds['indent'] = indent if compact: dump_kwds['separators'] = (',', ':') stdout = click.open_file( output, 'w') if output else click.get_text_stream('stdout') bidx = 1 if bandidx is None and band else bandidx if not sequence: geojson_type = 'collection' geographic = True if projection == 'geographic' else False with ctx.obj["env"] as env: with rasterio.open(input) as src: write_features( stdout, feature_gen( src, env, bidx, sampling=sampling, band=band, as_mask=as_mask, with_nodata=with_nodata, geographic=geographic, precision=precision, ), sequence=sequence, geojson_type=geojson_type, use_rs=use_rs, **dump_kwds ) def feature_gen(src, env, *args, **kwargs): class Collection: def __init__(self, env): self.bboxes = [] self.env = env @property def bbox(self): minxs, minys, maxxs, maxys = zip(*self.bboxes) return min(minxs), min(minys), max(maxxs), max(maxys) def __call__(self): for f in dataset_features(src, *args, **kwargs): self.bboxes.append(f['bbox']) yield f return Collection(env)
5,684
watch
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for forward-mode automatic differentiation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools from tensorflow.python import pywrap_tensorflow from tensorflow.python.eager import backprop from tensorflow.python.eager import def_function from tensorflow.python.eager import execute from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops.unconnected_gradients import UnconnectedGradients from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import nest # TODO(allenl): experimental_relax_shapes for gradients which rely on static # shape information may be underspecialized. We may want hand-written forward # implementations. @def_function.function(experimental_relax_shapes=True) def _forward_gradient(op_name, attr_tuple, inputs, outputs, tangents): """Computes a Jacobian-vector product for an op. Note that this function would be wasteful if executed eagerly. It runs the backward gradient function and throws away the result just to record its operations on a GradientTape. These unused ops are pruned away when this function is traced. Args: op_name: A string, the type of operation being executed. attr_tuple: Attributes of the operation. inputs: A flat list of input Tensors to the operation. outputs: A flat list of output Tensors from the operation. tangents: A flat list of Tensors, same shape as `inputs`. Returns: A flat list of tangents corresponding to `outputs`. """ float_inputs = [] float_indices = [] nontrivial_tangents = [] for input_index, tensor in enumerate(inputs): if tensor.dtype.is_floating: float_inputs.append(tensor) float_indices.append(input_index) nontrivial_tangents.append(tangents[input_index]) with backprop.GradientTape() as transpose_tape: with backprop.GradientTape() as backfunc_tape: backfunc_tape.METHOD_NAME(float_inputs) execute.record_gradient(op_name, inputs, attr_tuple, outputs, "forward_op_replay") forwardprop_aids = [] float_outputs = [] nontrivial_output_indices = [] for output_index, output in enumerate(outputs): if output.dtype.is_floating: forwardprop_aids.append( array_ops.ones_like(output, name="unused_forwardprop_aid")) float_outputs.append(output) nontrivial_output_indices.append(output_index) transpose_tape.METHOD_NAME(forwardprop_aids) grads = backfunc_tape.gradient( float_outputs, float_inputs, forwardprop_aids, unconnected_gradients=UnconnectedGradients.ZERO) nontrivial_output_tangents = transpose_tape.gradient( grads, forwardprop_aids, output_gradients=nontrivial_tangents) output_tangents = [None] * len(outputs) for index, tangent in zip(nontrivial_output_indices, nontrivial_output_tangents): output_tangents[index] = tangent return output_tangents pywrap_tensorflow.TFE_Py_RegisterForwardGradientFunction(_forward_gradient) class ForwardGradientAccumulator(object): """Computes Jacobian-vector products using forward-mode autodiff. Example: ``` with ForwardGradientAccumulator() as acc: x = tf.constant([[2.0, 3.0], [1.0, 4.0]]) acc.watch(x, tf.constant([[5., 6.], [7., 8.]])) y = tf.reduce_sum(tf.sin(x) * tf.tan(x), axis=1) jvp = acc.jvp(y) ``` Note that `ForwardGradientAccumulator`s are always applied in creation order, so inner accumulators may not see JVP computation from outer accumulators. Take higher-order gradients from outer accumulators: ``` primal = tf.constant(1.1) with ForwardGradientAccumulator() as outer_acc: outer_acc.watch(primal, tf.constant(1.)) with ForwardGradientAccumulator() as acc: acc.watch(primal, tf.constant(1.)) primal_out = primal ** tf.constant(3.5) inner_jvp = acc.jvp(primal_out) outer_jvp = outer_acc.jvp(inner_jvp) ``` Reversing the collection in the last two lines to instead retrieve `acc.jvp(outer_acc.jvp(primal_out))` will not work. """ def __init__(self): self._accumulator = None self._recording = False def __enter__(self): self._push_accumulator() return self def __exit__(self, typ, value, traceback): if self._recording: self._pop_accumulator() def _push_accumulator(self): if self._recording: raise ValueError("Accumulator is already recording.") if self._accumulator is None: self._accumulator = pywrap_tensorflow.TFE_Py_ForwardAccumulatorNew() else: # TODO(allenl): Allow reuse raise NotImplementedError("Accumulator reuse isn't implemented yet.") self._recording = True def _pop_accumulator(self): if not self._recording: raise ValueError("Tape is not recording.") pywrap_tensorflow.TFE_Py_ForwardAccumulatorSetRemove(self._accumulator) self._recording = False # TODO(allenl): Does this need to be public, or should the constructor instead # take all watched Tensors? Write a realistic usage example (e.g. Hessian-free # optimization) and decide. def METHOD_NAME(self, tensor, tangents): """Ensures that `tensor` is being traced by this tape. Mathematically, `tangents` is part of a vector right-multiplying the Jacobian matrix (a Jacobian-vector product) for the function computed while the tape is active. Since JVPs are computed in forward mode as the computation happens, this vector must be supplied before the computation takes place. Watching a single Tensor multiple times sums each `tangents`. An un-watched Tensor has zeros for its tangent vector. Args: tensor: A Tensor or list of Tensors. tangents: A Tensor or list of Tensors matching `tensor`. """ nest.assert_same_structure(tensor, tangents) for t, g in zip(nest.flatten(tensor), nest.flatten(tangents)): if not t.dtype.is_floating: logging.log_first_n( logging.WARN, "The dtype of the watched tensor must be " "floating (e.g. tf.float32), got %r", 5, t.dtype) if hasattr(t, "handle"): # TODO(allenl): Handle watching variables. raise NotImplementedError("Currently only Tensors may be watched.") g = ops.convert_to_tensor(g, dtype=t.dtype) pywrap_tensorflow.TFE_Py_ForwardAccumulatorWatch(self._accumulator, t, g) def jvp(self, target): """Fetches the Jacobian-vector product computed for `target`. Note that this function performs no computation, and simply looks up a JVP that was already computed (unlike backprop using a `tf.GradientTape`, where the computation happens on the call to `tape.gradient`). Args: target: A watched Tensor or structure of Tensors to fetch the JVPs for. Returns: Tensors with the same shapes and dtypes as `target`, or None if no JVP is available. """ if self._accumulator is None: raise ValueError("Called jvp() without first tracing anything.") return nest.map_structure( functools.partial(pywrap_tensorflow.TFE_Py_ForwardAccumulatorJVP, self._accumulator), target)
5,685
estimate performance
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A python interface for Grappler clusters.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import contextlib from tensorflow.core.framework import step_stats_pb2 from tensorflow.core.grappler.costs import op_performance_data_pb2 from tensorflow.core.protobuf import device_properties_pb2 from tensorflow.python import pywrap_tensorflow as tf_cluster class Cluster(object): """Grappler Clusters.""" def __init__(self, allow_soft_placement=True, disable_detailed_stats=True, disable_timeline=True, devices=None): """Creates a Cluster. Args: allow_soft_placement: If True, TF will automatically fix illegal placements instead of erroring out if the placement isn't legal. disable_detailed_stats: If True, detailed statistics will not be available. disable_timeline: If True, the timeline information will not be reported. devices: A list of devices of type device_properties_pb2.NamedDevice. If None, a device list will be created based on the spec of the local machine. """ self._tf_cluster = None self._generate_timeline = not disable_timeline if devices is None: self._tf_cluster = tf_cluster.TF_NewCluster(allow_soft_placement, disable_detailed_stats) else: devices_serialized = [device.SerializeToString() for device in devices] self._tf_cluster = tf_cluster.TF_NewVirtualCluster(devices_serialized) def Shutdown(self): if self._tf_cluster is not None: tf_cluster.TF_ShutdownCluster(self._tf_cluster) self._tf_cluster = None def __del__(self): self.Shutdown() @property def tf_cluster(self): return self._tf_cluster def ListDevices(self): """Returns a list of available hardware devices.""" if self._tf_cluster is None: return [] return [device_properties_pb2.NamedDevice.FromString(device) for device in tf_cluster.TF_ListDevices(self._tf_cluster)] def ListAvailableOps(self): """Returns a list of all available operations (sorted alphabetically).""" return tf_cluster.TF_ListAvailableOps() def GetSupportedDevices(self, item): return tf_cluster.TF_GetSupportedDevices(self._tf_cluster, item.tf_item) def METHOD_NAME(self, device): return tf_cluster.TF_EstimatePerformance(device.SerializeToString()) def MeasureCosts(self, item): """Returns the cost of running the specified item. Args: item: The item for which to measure the costs. Returns: The triplet op_perfs, runtime, step_stats. """ ret_from_swig = tf_cluster.TF_MeasureCosts(item.tf_item, self._tf_cluster, self._generate_timeline) if ret_from_swig is None: return None op_perf_bytes_list, run_time, step_stats_bytes = ret_from_swig op_perfs = [op_performance_data_pb2.OpPerformance.FromString(op_perf_bytes) for op_perf_bytes in op_perf_bytes_list] return (op_perfs, run_time, step_stats_pb2.StepStats.FromString(step_stats_bytes)) def DeterminePeakMemoryUsage(self, item): """Returns a snapshot of the peak memory usage. Args: item: The item for which to measure the costs. Returns: A hashtable indexed by device name. """ return tf_cluster.TF_DeterminePeakMemoryUsage(item.tf_item, self._tf_cluster) @contextlib.contextmanager def Provision(allow_soft_placement=True, disable_detailed_stats=True, disable_timeline=True, devices=None): cluster = Cluster(allow_soft_placement, disable_detailed_stats, disable_timeline, devices) yield cluster cluster.Shutdown()
5,686
validate after
from typing import Any, ClassVar, Generic, List, Optional, TypeVar, Union from typing_extensions import Self from pydantic import BaseModel, ConfigDict, Field, create_model, field_validator, model_validator, validator from pydantic.dataclasses import dataclass class Model(BaseModel): x: float y: str model_config = ConfigDict(from_attributes=True) class SelfReferencingModel(BaseModel): submodel: Optional['SelfReferencingModel'] @property def prop(self) -> None: ... SelfReferencingModel.model_rebuild() model = Model(x=1, y='y') Model(x=1, y='y', z='z') model.x = 2 model.model_validate(model) self_referencing_model = SelfReferencingModel(submodel=SelfReferencingModel(submodel=None)) class KwargsModel(BaseModel, from_attributes=True): x: float y: str kwargs_model = KwargsModel(x=1, y='y') KwargsModel(x=1, y='y', z='z') kwargs_model.x = 2 kwargs_model.model_validate(kwargs_model.__dict__) class InheritingModel(Model): z: int = 1 InheritingModel.model_validate(model.__dict__) class ForwardReferencingModel(Model): future: 'FutureModel' class FutureModel(Model): pass ForwardReferencingModel.model_rebuild() future_model = FutureModel(x=1, y='a') forward_model = ForwardReferencingModel(x=1, y='a', future=future_model) class NoMutationModel(BaseModel): x: int model_config = ConfigDict(frozen=True) class MutationModel(NoMutationModel): a: int = 1 model_config = ConfigDict(frozen=False, from_attributes=True) MutationModel(x=1).x = 2 MutationModel.model_validate(model.__dict__) class KwargsNoMutationModel(BaseModel, frozen=True): x: int class KwargsMutationModel(KwargsNoMutationModel, frozen=False, from_attributes=True): a: int = 1 KwargsMutationModel(x=1).x = 2 KwargsMutationModel.model_validate(model.__dict__) class OverrideModel(Model): x: int OverrideModel(x=1, y='b') class Mixin: def f(self) -> None: pass class MultiInheritanceModel(BaseModel, Mixin): pass MultiInheritanceModel().f() class AliasModel(BaseModel): x: str = Field(..., alias='y') alias_model = AliasModel(y='hello') assert alias_model.x == 'hello' class ClassVarModel(BaseModel): x: int y: ClassVar[int] = 1 ClassVarModel(x=1) @dataclass(config={'validate_assignment': True}) class AddProject: name: str slug: Optional[str] description: Optional[str] p = AddProject(name='x', slug='y', description='z') class TypeAliasAsAttribute(BaseModel): __type_alias_attribute__ = Union[str, bytes] class NestedModel(BaseModel): class Model(BaseModel): id: str model: Model _ = NestedModel.Model DynamicModel = create_model('DynamicModel', __base__=Model) dynamic_model = DynamicModel(x=1, y='y') dynamic_model.x = 2 class FrozenModel(BaseModel): x: int model_config = ConfigDict(frozen=True) class NotFrozenModel(FrozenModel): a: int = 1 model_config = ConfigDict(frozen=False, from_attributes=True) NotFrozenModel(x=1).x = 2 NotFrozenModel.model_validate(model.__dict__) class KwargsFrozenModel(BaseModel, frozen=True): x: int class KwargsNotFrozenModel(FrozenModel, frozen=False, from_attributes=True): a: int = 1 KwargsNotFrozenModel(x=1).x = 2 KwargsNotFrozenModel.model_validate(model.__dict__) class ModelWithSelfField(BaseModel): self: str def f(name: str) -> str: return name class ModelWithAllowReuseValidator(BaseModel): name: str normalize_name = field_validator('name')(f) model_with_allow_reuse_validator = ModelWithAllowReuseValidator(name='xyz') T = TypeVar('T') class Response(BaseModel, Generic[T]): data: T error: Optional[str] response = Response[Model](data=model, error=None) class ModelWithAnnotatedValidator(BaseModel): name: str @field_validator('name') def noop_validator_with_annotations(cls, name: str) -> str: return name def _default_factory_str() -> str: return 'x' def _default_factory_list() -> List[int]: return [1, 2, 3] class FieldDefaultTestingModel(BaseModel): # Required a: int b: int = Field() c: int = Field(...) # Default d: int = Field(1) # Default factory g: List[int] = Field(default_factory=_default_factory_list) h: str = Field(default_factory=_default_factory_str) i: str = Field(default_factory=lambda: 'test') _TModel = TypeVar('_TModel') _TType = TypeVar('_TType') class OrmMixin(Generic[_TModel, _TType]): @classmethod def from_orm(cls, model: _TModel) -> _TType: raise NotImplementedError @classmethod def from_orm_optional(cls, model: Optional[_TModel]) -> Optional[_TType]: if model is None: return None return cls.from_orm(model) import sys # noqa E402 if sys.version_info >= (3, 8): from dataclasses import InitVar # E402 InitVarStr = InitVar[str] else: # InitVar is not supported in 3.7 due to loss of type information InitVarStr = str @dataclass class MyDataClass: foo: InitVarStr bar: str MyDataClass(foo='foo', bar='bar') def get_my_custom_validator(field_name: str) -> Any: @validator(field_name, allow_reuse=True) def my_custom_validator(cls: Any, v: int) -> int: return v return my_custom_validator def foo() -> None: class MyModel(BaseModel): number: int custom_validator = get_my_custom_validator('number') # type: ignore[pydantic-field] @model_validator(mode='before') @classmethod def validate_before(cls, values: Any) -> Any: return values @model_validator(mode='after') def METHOD_NAME(self) -> Self: return self MyModel(number=2)
5,687
test roundtrip merged
import filecmp import tempfile from os.path import exists import gemmi import pytest from pandas.testing import assert_frame_equal import reciprocalspaceship as rs from reciprocalspaceship.utils import in_asu def test_read_merged(IOtest_mtz): """Test rs.read_mtz() with merged MTZ file""" dataset = rs.read_mtz(IOtest_mtz) assert dataset.spacegroup.number == 96 assert dataset.columns.to_list() == ["FMODEL", "PHIFMODEL"] assert dataset.index.names == ["H", "K", "L"] assert isinstance(dataset, rs.DataSet) def test_write_merged(IOtest_mtz): """Test DataSet.write_mtz() with merged MTZ file""" dataset = rs.read_mtz(IOtest_mtz) with tempfile.NamedTemporaryFile(suffix=".mtz") as temp: dataset.write_mtz(temp.name) assert exists(temp.name) def test_write_merged_nosg(IOtest_mtz): """Test that DataSet.write_mtz() without spacegroup raises AttributeError""" dataset = rs.read_mtz(IOtest_mtz) dataset.spacegroup = None with tempfile.NamedTemporaryFile(suffix=".mtz") as temp: with pytest.raises(AttributeError): dataset.write_mtz(temp.name) def test_write_merged_nocell(IOtest_mtz): """Test that DataSet.write_mtz() without cell raises AttributeError""" dataset = rs.read_mtz(IOtest_mtz) dataset.cell = None with tempfile.NamedTemporaryFile(suffix=".mtz") as temp: with pytest.raises(AttributeError): dataset.write_mtz(temp.name) @pytest.mark.parametrize("skip_problem_mtztypes", [True, False]) def test_write_merged_nonMTZDtype(IOtest_mtz, skip_problem_mtztypes): """ Test skip_problem_mtztypes flag of DataSet.write_mtz() """ dataset = rs.read_mtz(IOtest_mtz) dataset["nonMTZ"] = 1 with tempfile.NamedTemporaryFile(suffix=".mtz") as temp: if not skip_problem_mtztypes: with pytest.raises(ValueError): dataset.write_mtz( temp.name, skip_problem_mtztypes=skip_problem_mtztypes ) else: dataset.write_mtz(temp.name, skip_problem_mtztypes=skip_problem_mtztypes) assert exists(temp.name) def METHOD_NAME(IOtest_mtz): """Test roundtrip of rs.read_mtz() and DataSet.write_mtz() with merged MTZ file""" expected = rs.read_mtz(IOtest_mtz) temp1 = tempfile.NamedTemporaryFile(suffix=".mtz") temp2 = tempfile.NamedTemporaryFile(suffix=".mtz") expected.write_mtz(temp1.name) result = rs.read_mtz(temp1.name) result.write_mtz(temp2.name) assert_frame_equal(result, expected) assert filecmp.cmp(temp1.name, temp2.name) # Clean up temp1.close() temp2.close() def test_read_unmerged(data_unmerged): """Test rs.read_mtz() with unmerged data""" # Unmerged data will not be in asu, and should have a PARTIAL column assert not in_asu(data_unmerged.get_hkls(), data_unmerged.spacegroup).all() assert "PARTIAL" in data_unmerged.columns assert data_unmerged["PARTIAL"].dtype.name == "bool" assert not "M/ISYM" in data_unmerged.columns assert not data_unmerged.merged def test_read_unmerged_2m_isym(data_unmerged): """Test rs.read_mtz() with unmerged data containing 2 M/ISYM columns""" data_unmerged["EXTRA"] = 1 data_unmerged["EXTRA"] = data_unmerged["EXTRA"].astype("M/ISYM") temp = tempfile.NamedTemporaryFile(suffix=".mtz") data_unmerged.write_mtz(temp.name) with pytest.raises(ValueError): fails = rs.read_mtz(temp.name) # Clean up temp.close() @pytest.mark.parametrize("label_centrics", [True, False]) def test_roundtrip_unmerged(data_unmerged, label_centrics): """ Test roundtrip of rs.read_mtz() and DataSet.write_mtz() with unmerged data """ if label_centrics: data_unmerged.label_centrics(inplace=True) temp = tempfile.NamedTemporaryFile(suffix=".mtz") temp2 = tempfile.NamedTemporaryFile(suffix=".mtz") data_unmerged.write_mtz(temp.name) data2 = rs.read_mtz(temp.name) data2 = data2[data_unmerged.columns] # Ensure consistent column ordering data2.write_mtz(temp2.name) assert filecmp.cmp(temp.name, temp2.name) assert_frame_equal(data_unmerged, data2) assert data_unmerged.merged == data2.merged # Clean up temp.close() temp2.close() @pytest.mark.parametrize("in_asu", [True, False]) def test_unmerged_after_write(data_unmerged, in_asu): """ #110: Test that unmerged DataSet objects are unchanged following calls to DataSet.write_mtz() """ if in_asu: data_unmerged.hkl_to_asu(inplace=True) expected = data_unmerged.copy() data_unmerged.write_mtz("/dev/null") assert_frame_equal(data_unmerged, expected) @pytest.mark.parametrize("project_name", [None, "project", "reciprocalspaceship", 1]) @pytest.mark.parametrize("crystal_name", [None, "crystal", "reciprocalspaceship", 1]) @pytest.mark.parametrize("dataset_name", [None, "dataset", "reciprocalspaceship", 1]) def test_to_gemmi_names(IOtest_mtz, project_name, crystal_name, dataset_name): """ Test that DataSet.to_gemmi() sets project/crystal/dataset names when given. ValueError should be raised for anything other than a string. """ ds = rs.read_mtz(IOtest_mtz) if ( not isinstance(project_name, str) or not isinstance(crystal_name, str) or not isinstance(dataset_name, str) ): with pytest.raises(ValueError): ds.to_gemmi( project_name=project_name, crystal_name=crystal_name, dataset_name=dataset_name, ) return gemmimtz = ds.to_gemmi( project_name=project_name, crystal_name=crystal_name, dataset_name=dataset_name, ) assert gemmimtz.dataset(0).project_name == project_name assert gemmimtz.dataset(0).crystal_name == crystal_name assert gemmimtz.dataset(0).dataset_name == dataset_name @pytest.mark.parametrize("project_name", [None, "project", "reciprocalspaceship", 1]) @pytest.mark.parametrize("crystal_name", [None, "crystal", "reciprocalspaceship", 1]) @pytest.mark.parametrize("dataset_name", [None, "dataset", "reciprocalspaceship", 1]) def test_write_mtz_names(IOtest_mtz, project_name, crystal_name, dataset_name): """ Test that DataSet.write_mtz() sets project/crystal/dataset names when given. ValueError should be raised for anything other than a string. """ ds = rs.read_mtz(IOtest_mtz) temp = tempfile.NamedTemporaryFile(suffix=".mtz") if ( not isinstance(project_name, str) or not isinstance(crystal_name, str) or not isinstance(dataset_name, str) ): with pytest.raises(ValueError): ds.write_mtz( temp.name, project_name=project_name, crystal_name=crystal_name, dataset_name=dataset_name, ) temp.close() return else: ds.write_mtz( temp.name, project_name=project_name, crystal_name=crystal_name, dataset_name=dataset_name, ) gemmimtz = gemmi.read_mtz_file(temp.name) assert gemmimtz.dataset(0).project_name == project_name assert gemmimtz.dataset(0).crystal_name == crystal_name assert gemmimtz.dataset(0).dataset_name == dataset_name # Clean up temp.close()
5,688
installation cancelled or errored
# -*- coding: utf-8 -*- # # Copyright © Spyder Project Contributors # Licensed under the terms of the MIT License # (see spyder/__init__.py for details) """ Status widget for Kite completions. """ # Standard library imports import logging # Third party imports from qtpy.QtCore import Slot # Local imports from spyder.api.widgets.status import StatusBarWidget from spyder.config.base import _, running_under_pytest from spyder.plugins.completion.providers.kite.utils.status import ( check_if_kite_installed, NOT_INSTALLED) from spyder.plugins.completion.providers.kite.utils.install import ( KiteInstallationThread) from spyder.plugins.completion.providers.kite.widgets.install import ( KiteInstallerDialog) from spyder.utils.icon_manager import ima logger = logging.getLogger(__name__) class KiteStatusWidget(StatusBarWidget): """Status bar widget for Kite completions status.""" BASE_TOOLTIP = _("Kite completions status") DEFAULT_STATUS = _('not reachable') ID = 'kite_status' def __init__(self, parent, provider): self.provider = provider self.tooltip = self.BASE_TOOLTIP self.installation_thread = KiteInstallationThread(None) super().__init__(parent) is_installed, _ = check_if_kite_installed() self.setVisible(is_installed) # Installation dialog self.installer = KiteInstallerDialog( self, self.installation_thread) self.installation_thread.sig_installation_status.connect( self.set_value) self.sig_clicked.connect(self.show_installation_dialog) def set_value(self, value): """Return Kite completions state.""" kite_enabled = self.provider.get_conf(('enabled_providers', 'kite'), default=True, section='completions') is_installing = self.is_installing() cancelled_or_errored = self.METHOD_NAME() if (value is not None and 'short' in value): self.tooltip = value['long'] value = value['short'] elif value is not None and (is_installing or cancelled_or_errored): self.setVisible(True) if value == NOT_INSTALLED: return elif is_installing: self.tooltip = _("Kite installation will continue in the " "background.\n" "Click here to show the installation " "dialog again") elif cancelled_or_errored: self.tooltip = _("Click here to show the\n" "installation dialog again") elif value is None: value = self.DEFAULT_STATUS self.tooltip = self.BASE_TOOLTIP self.update_tooltip() self.setVisible(value != NOT_INSTALLED and kite_enabled) value = "Kite: {0}".format(value) super(KiteStatusWidget, self).set_value(value) def get_tooltip(self): """Reimplementation to get a dynamic tooltip.""" return self.tooltip def get_icon(self): return ima.icon('kite') @Slot() def show_installation_dialog(self): """Show installation dialog.""" installed, path = check_if_kite_installed() if not installed and not running_under_pytest(): self.installer.show() def is_installing(self): """Check if an installation is taking place.""" return (self.installation_thread.isRunning() and not self.installation_thread.cancelled) def METHOD_NAME(self): """Check if an installation was cancelled or failed.""" return self.installation_thread.cancelled_or_errored() @Slot() def mainwindow_setup_finished(self): """ This is called after the main window setup finishes, and the third time Spyder is started, to show Kite's installation dialog and onboarding if necessary. """ spyder_runs = self.provider.get_conf('spyder_runs') installers_available = self.provider.get_conf('installers_available') if spyder_runs == 3 and installers_available: self.provider._kite_onboarding() show_dialog = self.provider.get_conf('show_installation_dialog') if show_dialog: # Only show the dialog once at startup self.provider.set_conf('show_installation_dialog', False) self.show_installation_dialog() else: if spyder_runs < 3: self.provider.set_conf('spyder_runs', spyder_runs + 1)
5,689
test mplex stream reset
import pytest import trio from trio.testing import wait_all_tasks_blocked from libp2p.stream_muxer.mplex.exceptions import ( MplexStreamClosed, MplexStreamEOF, MplexStreamReset, ) from libp2p.stream_muxer.mplex.mplex import MPLEX_MESSAGE_CHANNEL_SIZE from libp2p.tools.constants import MAX_READ_LEN DATA = b"data_123" @pytest.mark.trio async def test_mplex_stream_read_write(mplex_stream_pair): stream_0, stream_1 = mplex_stream_pair await stream_0.write(DATA) assert (await stream_1.read(MAX_READ_LEN)) == DATA @pytest.mark.trio async def test_mplex_stream_full_buffer(mplex_stream_pair): stream_0, stream_1 = mplex_stream_pair # Test: The message channel is of size `MPLEX_MESSAGE_CHANNEL_SIZE`. # It should be fine to read even there are already `MPLEX_MESSAGE_CHANNEL_SIZE` # messages arriving. for _ in range(MPLEX_MESSAGE_CHANNEL_SIZE): await stream_0.write(DATA) await wait_all_tasks_blocked() # Sanity check assert MAX_READ_LEN >= MPLEX_MESSAGE_CHANNEL_SIZE * len(DATA) assert (await stream_1.read(MAX_READ_LEN)) == MPLEX_MESSAGE_CHANNEL_SIZE * DATA # Test: Read after `MPLEX_MESSAGE_CHANNEL_SIZE + 1` messages has arrived, which # exceeds the channel size. The stream should have been reset. for _ in range(MPLEX_MESSAGE_CHANNEL_SIZE + 1): await stream_0.write(DATA) await wait_all_tasks_blocked() with pytest.raises(MplexStreamReset): await stream_1.read(MAX_READ_LEN) @pytest.mark.trio async def test_mplex_stream_pair_read_until_eof(mplex_stream_pair): read_bytes = bytearray() stream_0, stream_1 = mplex_stream_pair async def read_until_eof(): read_bytes.extend(await stream_1.read()) expected_data = bytearray() async with trio.open_nursery() as nursery: nursery.start_soon(read_until_eof) # Test: `read` doesn't return before `close` is called. await stream_0.write(DATA) expected_data.extend(DATA) await trio.sleep(0.01) assert len(read_bytes) == 0 # Test: `read` doesn't return before `close` is called. await stream_0.write(DATA) expected_data.extend(DATA) await trio.sleep(0.01) assert len(read_bytes) == 0 # Test: Close the stream, `read` returns, and receive previous sent data. await stream_0.close() assert read_bytes == expected_data @pytest.mark.trio async def test_mplex_stream_read_after_remote_closed(mplex_stream_pair): stream_0, stream_1 = mplex_stream_pair assert not stream_1.event_remote_closed.is_set() await stream_0.write(DATA) assert not stream_0.event_local_closed.is_set() await trio.sleep(0.01) await wait_all_tasks_blocked() await stream_0.close() assert stream_0.event_local_closed.is_set() await trio.sleep(0.01) await wait_all_tasks_blocked() assert stream_1.event_remote_closed.is_set() assert (await stream_1.read(MAX_READ_LEN)) == DATA with pytest.raises(MplexStreamEOF): await stream_1.read(MAX_READ_LEN) @pytest.mark.trio async def test_mplex_stream_read_after_local_reset(mplex_stream_pair): stream_0, stream_1 = mplex_stream_pair await stream_0.reset() with pytest.raises(MplexStreamReset): await stream_0.read(MAX_READ_LEN) @pytest.mark.trio async def test_mplex_stream_read_after_remote_reset(mplex_stream_pair): stream_0, stream_1 = mplex_stream_pair await stream_0.write(DATA) await stream_0.reset() # Sleep to let `stream_1` receive the message. await trio.sleep(0.1) await wait_all_tasks_blocked() with pytest.raises(MplexStreamReset): await stream_1.read(MAX_READ_LEN) @pytest.mark.trio async def test_mplex_stream_read_after_remote_closed_and_reset(mplex_stream_pair): stream_0, stream_1 = mplex_stream_pair await stream_0.write(DATA) await stream_0.close() await stream_0.reset() # Sleep to let `stream_1` receive the message. await trio.sleep(0.01) assert (await stream_1.read(MAX_READ_LEN)) == DATA @pytest.mark.trio async def test_mplex_stream_write_after_local_closed(mplex_stream_pair): stream_0, stream_1 = mplex_stream_pair await stream_0.write(DATA) await stream_0.close() with pytest.raises(MplexStreamClosed): await stream_0.write(DATA) @pytest.mark.trio async def test_mplex_stream_write_after_local_reset(mplex_stream_pair): stream_0, stream_1 = mplex_stream_pair await stream_0.reset() with pytest.raises(MplexStreamClosed): await stream_0.write(DATA) @pytest.mark.trio async def test_mplex_stream_write_after_remote_reset(mplex_stream_pair): stream_0, stream_1 = mplex_stream_pair await stream_1.reset() await trio.sleep(0.01) with pytest.raises(MplexStreamClosed): await stream_0.write(DATA) @pytest.mark.trio async def test_mplex_stream_both_close(mplex_stream_pair): stream_0, stream_1 = mplex_stream_pair # Flags are not set initially. assert not stream_0.event_local_closed.is_set() assert not stream_1.event_local_closed.is_set() assert not stream_0.event_remote_closed.is_set() assert not stream_1.event_remote_closed.is_set() # Streams are present in their `mplex_conn`. assert stream_0 in stream_0.muxed_conn.streams.values() assert stream_1 in stream_1.muxed_conn.streams.values() # Test: Close one side. await stream_0.close() await trio.sleep(0.01) assert stream_0.event_local_closed.is_set() assert not stream_1.event_local_closed.is_set() assert not stream_0.event_remote_closed.is_set() assert stream_1.event_remote_closed.is_set() # Streams are still present in their `mplex_conn`. assert stream_0 in stream_0.muxed_conn.streams.values() assert stream_1 in stream_1.muxed_conn.streams.values() # Test: Close the other side. await stream_1.close() await trio.sleep(0.01) # Both sides are closed. assert stream_0.event_local_closed.is_set() assert stream_1.event_local_closed.is_set() assert stream_0.event_remote_closed.is_set() assert stream_1.event_remote_closed.is_set() # Streams are removed from their `mplex_conn`. assert stream_0 not in stream_0.muxed_conn.streams.values() assert stream_1 not in stream_1.muxed_conn.streams.values() # Test: Reset after both close. await stream_0.reset() @pytest.mark.trio async def METHOD_NAME(mplex_stream_pair): stream_0, stream_1 = mplex_stream_pair await stream_0.reset() await trio.sleep(0.01) # Both sides are closed. assert stream_0.event_local_closed.is_set() assert stream_1.event_local_closed.is_set() assert stream_0.event_remote_closed.is_set() assert stream_1.event_remote_closed.is_set() # Streams are removed from their `mplex_conn`. assert stream_0 not in stream_0.muxed_conn.streams.values() assert stream_1 not in stream_1.muxed_conn.streams.values() # `close` should do nothing. await stream_0.close() await stream_1.close() # `reset` should do nothing as well. await stream_0.reset() await stream_1.reset()
5,690
title
# coding=utf-8 # *** WARNING: this file was generated by pulumi. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import copy import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities __all__ = [ 'GetApiIssueAttachmentResult', 'AwaitableGetApiIssueAttachmentResult', 'get_api_issue_attachment', 'get_api_issue_attachment_output', ] @pulumi.output_type class GetApiIssueAttachmentResult: """ Issue Attachment Contract details. """ def __init__(__self__, content=None, content_format=None, id=None, name=None, METHOD_NAME=None, type=None): if content and not isinstance(content, str): raise TypeError("Expected argument 'content' to be a str") pulumi.set(__self__, "content", content) if content_format and not isinstance(content_format, str): raise TypeError("Expected argument 'content_format' to be a str") pulumi.set(__self__, "content_format", content_format) if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name) if METHOD_NAME and not isinstance(METHOD_NAME, str): raise TypeError("Expected argument 'title' to be a str") pulumi.set(__self__, "title", METHOD_NAME) if type and not isinstance(type, str): raise TypeError("Expected argument 'type' to be a str") pulumi.set(__self__, "type", type) @property @pulumi.getter def content(self) -> str: """ An HTTP link or Base64-encoded binary data. """ return pulumi.get(self, "content") @property @pulumi.getter(name="contentFormat") def content_format(self) -> str: """ Either 'link' if content is provided via an HTTP link or the MIME type of the Base64-encoded binary data provided in the 'content' property. """ return pulumi.get(self, "content_format") @property @pulumi.getter def id(self) -> str: """ Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> str: """ The name of the resource """ return pulumi.get(self, "name") @property @pulumi.getter def METHOD_NAME(self) -> str: """ Filename by which the binary data will be saved. """ return pulumi.get(self, "title") @property @pulumi.getter def type(self) -> str: """ The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" """ return pulumi.get(self, "type") class AwaitableGetApiIssueAttachmentResult(GetApiIssueAttachmentResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetApiIssueAttachmentResult( content=self.content, content_format=self.content_format, id=self.id, name=self.name, METHOD_NAME=self.METHOD_NAME, type=self.type) def get_api_issue_attachment(api_id: Optional[str] = None, attachment_id: Optional[str] = None, issue_id: Optional[str] = None, resource_group_name: Optional[str] = None, service_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetApiIssueAttachmentResult: """ Gets the details of the issue Attachment for an API specified by its identifier. :param str api_id: API identifier. Must be unique in the current API Management service instance. :param str attachment_id: Attachment identifier within an Issue. Must be unique in the current Issue. :param str issue_id: Issue identifier. Must be unique in the current API Management service instance. :param str resource_group_name: The name of the resource group. The name is case insensitive. :param str service_name: The name of the API Management service. """ __args__ = dict() __args__['apiId'] = api_id __args__['attachmentId'] = attachment_id __args__['issueId'] = issue_id __args__['resourceGroupName'] = resource_group_name __args__['serviceName'] = service_name opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('azure-native:apimanagement/v20220801:getApiIssueAttachment', __args__, opts=opts, typ=GetApiIssueAttachmentResult).value return AwaitableGetApiIssueAttachmentResult( content=pulumi.get(__ret__, 'content'), content_format=pulumi.get(__ret__, 'content_format'), id=pulumi.get(__ret__, 'id'), name=pulumi.get(__ret__, 'name'), METHOD_NAME=pulumi.get(__ret__, 'title'), type=pulumi.get(__ret__, 'type')) @_utilities.lift_output_func(get_api_issue_attachment) def get_api_issue_attachment_output(api_id: Optional[pulumi.Input[str]] = None, attachment_id: Optional[pulumi.Input[str]] = None, issue_id: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, service_name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetApiIssueAttachmentResult]: """ Gets the details of the issue Attachment for an API specified by its identifier. :param str api_id: API identifier. Must be unique in the current API Management service instance. :param str attachment_id: Attachment identifier within an Issue. Must be unique in the current Issue. :param str issue_id: Issue identifier. Must be unique in the current API Management service instance. :param str resource_group_name: The name of the resource group. The name is case insensitive. :param str service_name: The name of the API Management service. """ ...
5,691
running
#!/usr/bin/env python3 '''Copied from m61-test''' import linuxcnc import os from time import sleep # this is how long we wait for linuxcnc to do our bidding class LinuxcncError(Exception): pass # def __init__(self, value): # self.value = value # def __str__(self): # return repr(self.value) class LinuxcncControl: ''' issue G-Code commands make sure important modes are saved and restored mode is saved only once, and can be restored only once usage example: e = emc_control() e.prepare_for_mdi() any internal sub using e.g("G0.....") e.finish_mdi() ''' def __init__(self,timeout=2): self.c = linuxcnc.command() self.e = linuxcnc.error_channel() self.s = linuxcnc.stat() self.timeout = timeout def METHOD_NAME(self, do_poll=True): ''' check whether interpreter is running. If so, can't switch to MDI mode. ''' if do_poll: self.s.poll() return (self.s.task_mode == linuxcnc.MODE_AUTO and self.s.interp_state != linuxcnc.INTERP_IDLE) def set_mode(self,m): ''' set EMC mode if possible, else throw LinuxcncError return current mode ''' self.s.poll() if self.s.task_mode == m : return m if self.METHOD_NAME(do_poll=False): raise LinuxcncError("interpreter running - can not change mode") self.c.mode(m) self.c.wait_complete() return m def set_state(self,m): ''' set EMC mode if possible, else throw LinuxcncError return current mode ''' self.s.poll() if self.s.task_mode == m : return m self.c.state(m) self.c.wait_complete(self.timeout) return m def do_home(self,axismask): self.s.poll() self.c.home(axismask) self.c.wait_complete(self.timeout) def ok_for_mdi(self): ''' check whether ok to run MDI commands. ''' self.s.poll() return not self.s.estop and self.s.enabled and self.s.homed def prepare_for_mdi(self): ''' check whether ok to run MDI commands. throw LinuxcncError if told so. return current mode ''' self.s.poll() if self.s.estop: raise LinuxcncError("machine in ESTOP") if not self.s.enabled: raise LinuxcncError("machine not enabled") if not self.s.homed: raise LinuxcncError("machine not homed") if self.METHOD_NAME(): raise LinuxcncError("interpreter not idle") return self.set_mode(linuxcnc.MODE_MDI) g_raise_except = True def g(self,code,wait=False): ''' issue G-Code as MDI command. wait for completion if requested ''' self.c.mdi(code) if wait: try: while self.c.wait_complete(self.timeout) == -1: pass return True except KeyboardInterrupt: print("interrupted by keyboard in c.wait_complete(self.timeout)") return False self.error = self.e.poll() if self.error: kind, text = self.error if kind in (linuxcnc.NML_ERROR, linuxcnc.OPERATOR_ERROR): if self.g_raise_except: raise LinuxcncError(text) else: print(("error " + text)) else: print(("info " + text)) return False def get_current_tool(self): self.s.poll() return self.s.tool_in_spindle def active_codes(self): self.e.poll() return self.s.gcodes def get_current_system(self): g = self.active_codes() for i in g: if i >= 540 and i <= 590: return i/10 - 53 elif i >= 590 and i <= 593: return i - 584 return 1 def open_program(self,filename): '''Open an nc file''' self.s.poll() self.set_mode(linuxcnc.MODE_AUTO) self.c.wait_complete() sleep(.25) self.c.program_open(filename) self.c.wait_complete() def run_full_program(self): '''Start a loaded program''' self.s.poll() self.c.auto(linuxcnc.AUTO_RUN, 0) self.c.wait_complete(self.timeout) return self.check_rcs_error() def set_feed_scale(self,scale): '''Assign a feed scale''' self.s.poll() self.c.feedrate(scale) self.c.wait_complete(self.timeout) def wait_on_program(self): self.s.poll() while self.s.exec_state != linuxcnc.EXEC_DONE or self.s.state != linuxcnc.RCS_DONE and self.s.task_state == linuxcnc.STATE_ON: sleep(.25) self.s.poll() if self.s.task_state != linuxcnc.STATE_ON: return False if self.check_rcs_error(): print("Found RCS error while waiting, running again") self.run_full_program() return True def check_rcs_error(self): self.s.poll() if self.s.state == linuxcnc.RCS_ERROR: print("detected RCS error") return True return False def introspect(): os.system("halcmd show pin python-ui")
5,692
has headers
from _typeshed import FileDescriptorOrPath, Incomplete, SupportsWrite from collections.abc import Iterable, Mapping from distutils.cmd import Command from re import Pattern from typing import IO, Any, ClassVar, TypeVar, overload from typing_extensions import TypeAlias command_re: Pattern[str] _OptionsList: TypeAlias = list[tuple[str, str | None, str, int] | tuple[str, str | None, str]] _CommandT = TypeVar("_CommandT", bound=Command) class DistributionMetadata: def __init__(self, path: FileDescriptorOrPath | None = None) -> None: ... name: str | None version: str | None author: str | None author_email: str | None maintainer: str | None maintainer_email: str | None url: str | None license: str | None description: str | None long_description: str | None keywords: str | list[str] | None platforms: str | list[str] | None classifiers: str | list[str] | None download_url: str | None provides: list[str] | None requires: list[str] | None obsoletes: list[str] | None def read_pkg_file(self, file: IO[str]) -> None: ... def write_pkg_info(self, base_dir: str) -> None: ... def write_pkg_file(self, file: SupportsWrite[str]) -> None: ... def get_name(self) -> str: ... def get_version(self) -> str: ... def get_fullname(self) -> str: ... def get_author(self) -> str: ... def get_author_email(self) -> str: ... def get_maintainer(self) -> str: ... def get_maintainer_email(self) -> str: ... def get_contact(self) -> str: ... def get_contact_email(self) -> str: ... def get_url(self) -> str: ... def get_license(self) -> str: ... def get_licence(self) -> str: ... def get_description(self) -> str: ... def get_long_description(self) -> str: ... def get_keywords(self) -> str | list[str]: ... def get_platforms(self) -> str | list[str]: ... def get_classifiers(self) -> str | list[str]: ... def get_download_url(self) -> str: ... def get_requires(self) -> list[str]: ... def set_requires(self, value: Iterable[str]) -> None: ... def get_provides(self) -> list[str]: ... def set_provides(self, value: Iterable[str]) -> None: ... def get_obsoletes(self) -> list[str]: ... def set_obsoletes(self, value: Iterable[str]) -> None: ... class Distribution: cmdclass: dict[str, type[Command]] metadata: DistributionMetadata def __init__(self, attrs: Mapping[str, Any] | None = None) -> None: ... def get_option_dict(self, command: str) -> dict[str, tuple[str, str]]: ... def parse_config_files(self, filenames: Iterable[str] | None = None) -> None: ... def get_command_obj(self, command: str, create: bool = True) -> Command | None: ... global_options: ClassVar[_OptionsList] common_usage: ClassVar[str] display_options: ClassVar[_OptionsList] display_option_names: ClassVar[list[str]] negative_opt: ClassVar[dict[str, str]] verbose: int dry_run: int help: int command_packages: list[str] | None script_name: str | None script_args: list[str] | None command_options: dict[str, dict[str, tuple[str, str]]] dist_files: list[tuple[str, str, str]] packages: Incomplete package_data: dict[str, list[str]] package_dir: Incomplete py_modules: Incomplete libraries: Incomplete headers: Incomplete ext_modules: Incomplete ext_package: Incomplete include_dirs: Incomplete extra_path: Incomplete scripts: Incomplete data_files: Incomplete password: str command_obj: Incomplete have_run: Incomplete want_user_cfg: bool def dump_option_dicts( self, header: Incomplete | None = None, commands: Incomplete | None = None, indent: str = "" ) -> None: ... def find_config_files(self): ... commands: Incomplete def parse_command_line(self): ... def finalize_options(self) -> None: ... def handle_display_options(self, option_order): ... def print_command_list(self, commands, header, max_length) -> None: ... def print_commands(self) -> None: ... def get_command_list(self): ... def get_command_packages(self): ... def get_command_class(self, command: str) -> type[Command]: ... @overload def reinitialize_command(self, command: str, reinit_subcommands: bool = False) -> Command: ... @overload def reinitialize_command(self, command: _CommandT, reinit_subcommands: bool = False) -> _CommandT: ... def announce(self, msg, level: int = 2) -> None: ... def run_commands(self) -> None: ... def run_command(self, command: str) -> None: ... def has_pure_modules(self) -> bool: ... def has_ext_modules(self) -> bool: ... def has_c_libraries(self) -> bool: ... def has_modules(self) -> bool: ... def METHOD_NAME(self) -> bool: ... def has_scripts(self) -> bool: ... def has_data_files(self) -> bool: ... def is_pure(self) -> bool: ... # Getter methods generated in __init__ def get_name(self) -> str: ... def get_version(self) -> str: ... def get_fullname(self) -> str: ... def get_author(self) -> str: ... def get_author_email(self) -> str: ... def get_maintainer(self) -> str: ... def get_maintainer_email(self) -> str: ... def get_contact(self) -> str: ... def get_contact_email(self) -> str: ... def get_url(self) -> str: ... def get_license(self) -> str: ... def get_licence(self) -> str: ... def get_description(self) -> str: ... def get_long_description(self) -> str: ... def get_keywords(self) -> str | list[str]: ... def get_platforms(self) -> str | list[str]: ... def get_classifiers(self) -> str | list[str]: ... def get_download_url(self) -> str: ... def get_requires(self) -> list[str]: ... def get_provides(self) -> list[str]: ... def get_obsoletes(self) -> list[str]: ...
5,693
test get input arguments set text method
import vsg import vsg.interfaces.notepad_pp import os import re import unittest from vsg.tests import utils lFile = [] utils.read_file(os.path.join(os.path.dirname(__file__),'test_input.vhd'), lFile) sFile = '\n'.join(lFile) lFileSyntaxError = [] utils.read_file(os.path.join(os.path.dirname(__file__),'test_input.syntax_error.vhd'), lFileSyntaxError) sFileSyntaxError = '\n'.join(lFileSyntaxError) lFileFixedStyleJcl = [] utils.read_file(os.path.join(os.path.dirname(__file__),'test_input.fixed_jcl_style.vhd'), lFileFixedStyleJcl) lFileFixedConfig1 = [] utils.read_file(os.path.join(os.path.dirname(__file__),'test_input.fixed_config_1.vhd'), lFileFixedConfig1) lFileFixedConfig2 = [] utils.read_file(os.path.join(os.path.dirname(__file__),'test_input.fixed_config_2.vhd'), lFileFixedConfig2) lExpectedOutput = [] lExpectedOutput.append('================================================================================') lExpectedOutput.append('File: None') lExpectedOutput.append('================================================================================') lExpectedOutput.append('Phase 7 of 7... Reporting') lExpectedOutput.append('Total Rules Checked: replaced') lExpectedOutput.append('Total Violations: 0') lExpectedOutput.append(' Error : 0') lExpectedOutput.append(' Warning : 0') sExpectedOutput = '\n'.join(lExpectedOutput) sExpectedOutput += '\n' sExpectedSyntaxErrorOutput = '' sExpectedSyntaxErrorOutput += '\n' sExpectedSyntaxErrorOutput += 'Error: Unexpected token detected while parsing architecture_body @ Line 6, Column 1 in file None\n' sExpectedSyntaxErrorOutput += ' Expecting : begin\n' sExpectedSyntaxErrorOutput += ' Found : end\n' sExpectedConfigurationErrorOutput = '' sExpectedConfigurationErrorOutput += 'ERROR: Invalid configuration of file None\n' sExpectedConfigurationErrorOutput += 'ERROR [config-001] Rule architecture_002 has been deprecated.\n' sExpectedConfigurationErrorOutput += ' Rule architecture_002 has been split into the following rules:\n' sExpectedConfigurationErrorOutput += ' architecture_030\n' sExpectedConfigurationErrorOutput += ' architecture_031\n' sExpectedConfigurationErrorOutput += ' architecture_032\n' sExpectedConfigurationErrorOutput += ' architecture_033\n' class test_interface(unittest.TestCase): def setUp(self): self.oInterface = vsg.interfaces.notepad_pp.New() def test_interface_exists(self): self.assertEqual('notepad++ interface', self.oInterface.identifier) def test_interface_fix_method(self): oInputArguments = self.oInterface.get_input_arguments() oInputArguments.set_text('LIBRARY ieee;') oResults = self.oInterface.fix(oInputArguments) sUpdatedText = oResults.get_text() self.assertEqual('library ieee;', sUpdatedText) def test_interface_fix_method_with_jcl_style(self): oInputArguments = self.oInterface.get_input_arguments() oInputArguments.set_text(sFile) oInputArguments.set_style('jcl') oResults = self.oInterface.fix(oInputArguments) sUpdatedText = oResults.get_text() self.assertEqual(lFileFixedStyleJcl, sUpdatedText.splitlines()) sOutput = oResults.get_stdout() sOutput = re.sub(r'Total Rules Checked: [0-9][0-9]*', r'Total Rules Checked: replaced', sOutput) self.assertEqual(sExpectedOutput, sOutput) self.assertFalse(oResults.has_violations()) def test_interface_fix_method_with_jcl_style_with_violations(self): oInputArguments = self.oInterface.get_input_arguments() oInputArguments.set_text(sFile) oInputArguments.set_style('jcl') oInputArguments.add_configuration(os.path.join(os.path.dirname(__file__), 'config_violation.yaml')) oResults = self.oInterface.fix(oInputArguments) self.assertTrue(oResults.has_violations()) def test_interface_fix_method_with_one_configuration(self): oInputArguments = self.oInterface.get_input_arguments() oInputArguments.set_text(sFile) oInputArguments.add_configuration(os.path.join(os.path.dirname(__file__), 'config_1.yaml')) oResults = self.oInterface.fix(oInputArguments) sUpdatedText = oResults.get_text() self.assertEqual(lFileFixedConfig1, sUpdatedText.splitlines()) def test_interface_fix_method_with_two_configurations(self): oInputArguments = self.oInterface.get_input_arguments() oInputArguments.set_text(sFile) oInputArguments.add_configuration(os.path.join(os.path.dirname(__file__), 'config_1.yaml')) oInputArguments.add_configuration(os.path.join(os.path.dirname(__file__), 'config_2.yaml')) oResults = self.oInterface.fix(oInputArguments) sUpdatedText = oResults.get_text() self.assertEqual(lFileFixedConfig2, sUpdatedText.splitlines()) def test_interface_fix_method_with_syntax_error(self): oInputArguments = self.oInterface.get_input_arguments() oInputArguments.set_text(sFileSyntaxError) oResults = self.oInterface.fix(oInputArguments) self.assertTrue(oResults.error_status()) self.assertFalse(oResults.has_violations()) sUpdatedText = oResults.get_text() self.assertEqual(lFileSyntaxError, sUpdatedText.splitlines()) sOutput = oResults.get_stdout() self.assertEqual(sExpectedSyntaxErrorOutput, sOutput) def test_interface_fix_method_with_configuration_error(self): oInputArguments = self.oInterface.get_input_arguments() oInputArguments.set_text(sFile) oInputArguments.add_configuration(os.path.join(os.path.dirname(__file__), 'config_error.yaml')) oResults = self.oInterface.fix(oInputArguments) self.assertTrue(oResults.error_status()) self.assertFalse(oResults.has_violations()) sUpdatedText = oResults.get_text() self.assertEqual(lFile, sUpdatedText.splitlines()) sOutput = oResults.get_stdout() self.assertEqual(sExpectedConfigurationErrorOutput, sOutput) class test_input_arguments(unittest.TestCase): def setUp(self): self.oInterface = vsg.interfaces.notepad_pp.New() self.oInputArguments = self.oInterface.get_input_arguments() def METHOD_NAME(self): self.assertIsNone(self.oInputArguments.text) self.oInputArguments.set_text('This is a test.') self.assertEqual('This is a test.', self.oInputArguments.text)
5,694
test sed returns proper command string with
# -*- coding: utf-8 -*- """ Testing of sed command. """ __author__ = 'Agnieszka Bylica' __copyright__ = 'Copyright (C) 2018, Nokia' __email__ = 'agnieszka.bylica@nokia.com' import pytest from moler.cmd.unix.sed import Sed from moler.exceptions import CommandFailure def test_sed_returns_proper_command_string(buffer_connection): sed_cmd = Sed(connection=buffer_connection.moler_connection, input_files=["old2"], options="-r", scripts=["s/a/A/"]) assert "sed -r -e 's/a/A/' old2" == sed_cmd.command_string def METHOD_NAME(buffer_connection): sed_cmd = Sed(connection=buffer_connection.moler_connection, input_files=["old", "old2"], scripts=["s/a/A/"], output_file="new") assert "sed -e 's/a/A/' old old2 > new" == sed_cmd.command_string def test_sed_returns_proper_command_string_with_script_file(buffer_connection): sed_cmd = Sed(connection=buffer_connection.moler_connection, input_files=["old", "old2"], script_files=["script"], output_file="new") assert "sed -f script old old2 > new" == sed_cmd.command_string def test_sed_catches_command_failure(buffer_connection, command_output_and_expected_result_command_failure): command_output, expected_result = command_output_and_expected_result_command_failure buffer_connection.remote_inject_response([command_output]) sed_cmd = Sed(connection=buffer_connection.moler_connection, input_files=["old", "old2"], scripts=["s/a/A"]) with pytest.raises(CommandFailure): sed_cmd() def test_sed_catches_command_failure_empty_input_file( buffer_connection, command_output_and_expected_result_command_failure_empty_input_file): command_output, expected_result = command_output_and_expected_result_command_failure_empty_input_file buffer_connection.remote_inject_response([command_output]) sed_cmd = Sed(connection=buffer_connection.moler_connection, input_files=["", " "], scripts=["s/a/A/"]) with pytest.raises(CommandFailure): sed_cmd() def test_sed_catches_command_failure_no_script(buffer_connection, command_output_and_expected_result_command_failure_no_script): command_output, expected_result = command_output_and_expected_result_command_failure_no_script buffer_connection.remote_inject_response([command_output]) sed_cmd = Sed(connection=buffer_connection.moler_connection, input_files=["old"], scripts=["", " "], script_files=[" ", " "]) assert "sed -e '' -e ' ' -f -f old" == sed_cmd.command_string with pytest.raises(CommandFailure): sed_cmd() def test_sed_catches_option_error(buffer_connection, command_output_and_expected_result_option_error): command_output, expected_result = command_output_and_expected_result_option_error buffer_connection.remote_inject_response([command_output]) sed_cmd = Sed(connection=buffer_connection.moler_connection, input_files=["old", "old2"], options="-h", scripts=["s/a/A/"]) with pytest.raises(CommandFailure): sed_cmd() def test_sed_catches_file_error(buffer_connection, command_output_and_expected_result_file_error): command_output, expected_result = command_output_and_expected_result_file_error buffer_connection.remote_inject_response([command_output]) sed_cmd = Sed(connection=buffer_connection.moler_connection, input_files=["old", "old3"], scripts=["s/a/A/"]) with pytest.raises(CommandFailure): sed_cmd() @pytest.fixture def command_output_and_expected_result_command_failure(): data = """xyz@debian:~$ sed -e 's/a/A' old old2 sed: -e expression #1, char 5: unterminated `s' command xyz@debian:~$""" result = dict() return data, result @pytest.fixture def command_output_and_expected_result_command_failure_empty_input_file(): data = """xyz@debian:~$ sed -e 's/a/A' xyz@debian:~$""" result = dict() return data, result @pytest.fixture def command_output_and_expected_result_command_failure_no_script(): data = """xyz@debian:~$ sed -e '' -e ' ' -f -f old sed: couldn't open file -f: No such file or directory xyz@debian:~$""" result = dict() return data, result @pytest.fixture def command_output_and_expected_result_option_error(): data = """xyz@debian:~$ sed -h -e 's/a/A/' old old2 sed: invalid option -- 'h' Usage: sed [OPTION]... {script-only-if-no-other-script} [input-file]... -n, --quiet, --silent suppress automatic printing of pattern space -e script, --expression=script add the script to the commands to be executed -f script-file, --file=script-file add the contents of script-file to the commands to be executed --follow-symlinks follow symlinks when processing in place -i[SUFFIX], --in-place[=SUFFIX] edit files in place (makes backup if SUFFIX supplied) -l N, --line-length=N specify the desired line-wrap length for the `l' command --posix disable all GNU extensions. -E, -r, --regexp-extended use extended regular expressions in the script (for portability use POSIX -E). -s, --separate consider files as separate rather than as a single, continuous long stream. --sandbox operate in sandbox mode. -u, --unbuffered load minimal amounts of data from the input files and flush the output buffers more often -z, --null-data separate lines by NUL characters --help display this help and exit --version output version information and exit If no -e, --expression, -f, or --file option is given, then the first non-option argument is taken as the sed script to interpret. All remaining arguments are names of input files; if no input files are specified, then the standard input is read. GNU sed home page: <http://www.gnu.org/software/sed/>. General help using GNU software: <http://www.gnu.org/gethelp/>. xyz@debian:~$""" result = dict() return data, result @pytest.fixture def command_output_and_expected_result_file_error(): data = """xyz@debian:~$ sed -e 's/a/A/' old old3 Apple peAr plum sed: can't read old3: No such file or directory xyz@debian:~$""" result = dict() return data, result
5,695
get re org limit
# coding=utf-8 # Distributed under the MIT software license, see the accompanying # file LICENSE or http://www.opensource.org/licenses/mit-license.php. from typing import Optional from qrl.core import config from qrl.core.misc import logger, db from qrl.generated import qrl_pb2, qrlstateinfo_pb2 class State: def __init__(self, my_db=None): self._db = my_db if not my_db: self._db = db.DB() # generate db object here self._tmp_state = None # Temporary State file which needs to be fetched during migration to new db # Change State Version, each time any change made to leveldb structure self._state_version = config.dev.state_version def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): if self._db is not None: if self._db.db is not None: del self._db.db del self._db self._db = None @property def state_version(self): return self._state_version @property def batch(self): return self._db.get_batch() @property def total_coin_supply(self): try: return int.from_bytes(self._db.get_raw(b'total_coin_supply'), byteorder='big', signed=False) except KeyError: return 0 def get_state_version(self) -> int: try: version = self._db.get_raw(b'state_version') return int(version.decode()) except KeyError: return 0 except Exception: raise Exception("Exception while retrieving version") def put_state_version(self): try: self._db.put_raw(b'state_version', str(self._state_version).encode()) except Exception: raise Exception("Exception while Setting version") def is_older_state_version(self): current_state_version = self.get_state_version() if current_state_version < self._state_version: return True return False def is_state_compatible(self) -> bool: current_state_version = self.get_state_version() if current_state_version > self._state_version: logger.warning("You have a state with Version %s", current_state_version) logger.warning("This node only supports State Version %s", self._state_version) return False elif self.is_older_state_version(): logger.warning("Old State Version Found %s", current_state_version) return True def get_mainchain_height(self) -> int: try: return int.from_bytes(self._db.get_raw(b'blockheight'), byteorder='big', signed=False) except KeyError: pass except Exception as e: logger.error('get_blockheight Exception %s', e) return -1 def update_mainchain_height(self, height, batch): self._db.put_raw(b'blockheight', height.to_bytes(8, byteorder='big', signed=False), batch) def METHOD_NAME(self) -> int: try: return int.from_bytes(self._db.get_raw(b'reorg_limit'), byteorder='big', signed=False) except KeyError: return 0 except Exception as e: logger.error('get_re_org_limit Exception %s', e) return -1 def update_re_org_limit(self, height, batch): reorg_limit = height - config.dev.reorg_limit if reorg_limit <= 0: return current_reorg_limit = self.METHOD_NAME() if reorg_limit <= current_reorg_limit: return self._db.put_raw(b'reorg_limit', reorg_limit.to_bytes(8, byteorder='big', signed=False), batch) def get_address_is_used(self, address: bytes) -> bool: # FIXME: Probably obsolete try: return self._db.get_raw(address) except KeyError: return False except Exception as e: # FIXME: Review logger.error('Exception in address_used') logger.exception(e) raise def write_batch(self, batch, sync=True): self._db.write_batch(batch, sync) ######################################### ######################################### ######################################### ######################################### ######################################### def _update_total_coin_supply(self, balance, batch): self._db.put_raw(b'total_coin_supply', (self.total_coin_supply + balance).to_bytes(8, byteorder='big', signed=False), batch) def _delete(self, key, batch): self._db.delete(key, batch) def put_dev_config_state(self, dev_config, batch): self._db.put_raw(dev_config.current_state_key, dev_config.SerializeToString(), batch) def get_dev_config_state(self, dev_config_state_key: bytes): try: data = self._db.get_raw(dev_config_state_key) pbdata = qrl_pb2.DevConfig() pbdata.ParseFromString(bytes(data)) return pbdata except KeyError: logger.debug('[get_dev_config_state] Dev Config not found') except Exception as e: logger.error('[get_dev_config_state] %s', e) return None def get_dev_config_current_state_key(self): try: return self._db.get_raw(b'dev_config_current_state_key') except KeyError: logger.debug('[get_dev_config_current_state_key] Dev Config not found') except Exception as e: logger.error('[get_dev_config_current_state_key] %s', e) return None def put_dev_config_current_state_key(self, dev_config_state_key: bytes, batch): self._db.put_raw(b'dev_config_current_state_key', dev_config_state_key, batch) def put_fork_state(self, fork_state: qrlstateinfo_pb2.ForkState, batch=None): self._db.put_raw(b'fork_state', fork_state.SerializeToString(), batch) def get_fork_state(self) -> Optional[qrlstateinfo_pb2.ForkState]: try: data = self._db.get_raw(b'fork_state') fork_state = qrlstateinfo_pb2.ForkState() fork_state.ParseFromString(bytes(data)) return fork_state except KeyError: return None except Exception as e: logger.error('Exception in get_fork_state') logger.exception(e) raise def delete_fork_state(self, batch=None): self._db.delete(b'fork_state', batch) @staticmethod def generate_token_key(address, token_txhash) -> bytes: return b'token_' + address + b'_' + token_txhash @staticmethod def generate_slave_key(address, slave_pk) -> bytes: return b'slave_' + address + b'_' + slave_pk def get_slave_pk_access_type(self, address: bytes, slave_pk: bytes) -> qrl_pb2.SlaveMetadata: slave_key = self.generate_slave_key(address, slave_pk) try: slave_metadata = qrl_pb2.SlaveMetadata() slave_metadata.ParseFromString(self._db.get_raw(slave_key)) return slave_metadata except KeyError: pass except Exception as e: logger.error('[get_slave_pk_access_type] %s', e) return None def get_token(self, address: bytes, token_txhash: bytes) -> qrl_pb2.TokenBalance: try: token_balance = qrl_pb2.TokenBalance() token_balance.ParseFromString(self._db.get_raw(self.generate_token_key(address, token_txhash))) return token_balance except KeyError: pass except Exception as e: logger.error('[get_token] %s', e) return None
5,696
storage remove
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from ..azcopy.util import AzCopy, client_auth_for_azcopy, login_auth_for_azcopy, _generate_sas_token # pylint: disable=too-many-statements, too-many-locals, unused-argument def storage_copy(source, destination, put_md5=None, recursive=None, blob_type=None, preserve_s2s_access_tier=None, content_type=None, follow_symlinks=None, exclude_pattern=None, include_pattern=None, exclude_path=None, include_path=None, cap_mbps=None, extra_options=None, **kwargs): azcopy = AzCopy() flags = [] if recursive is not None: flags.append('--recursive') if put_md5 is not None: flags.append('--put-md5') if blob_type is not None: flags.append('--blob-type=' + blob_type) if preserve_s2s_access_tier is not None: flags.append('--s2s-preserve-access-tier=' + str(preserve_s2s_access_tier)) if include_pattern is not None: flags.append('--include-pattern=' + include_pattern) if exclude_pattern is not None: flags.append('--exclude-pattern=' + exclude_pattern) if include_path is not None: flags.append('--include-path=' + include_path) if exclude_path is not None: flags.append('--exclude-path=' + exclude_path) if content_type is not None: flags.append('--content-type=' + content_type) if follow_symlinks is not None: flags.append('--follow-symlinks=true') if cap_mbps is not None: flags.append('--cap-mbps=' + cap_mbps) if extra_options is not None: flags.extend(extra_options) azcopy.copy(source, destination, flags=flags) def METHOD_NAME(cmd, client, service, target, recursive=None, exclude_pattern=None, include_pattern=None, exclude_path=None, include_path=None): if service == 'file': azcopy = _azcopy_file_client(cmd, client) else: azcopy = _azcopy_blob_client(cmd, client) flags = [] if recursive is not None: flags.append('--recursive') if include_pattern is not None: flags.append('--include-pattern=' + include_pattern) if exclude_pattern is not None: flags.append('--exclude-pattern=' + exclude_pattern) if include_path is not None: flags.append('--include-path=' + include_path) if exclude_path is not None: flags.append('--exclude-path=' + exclude_path) if service == 'file': flags.append('--from-to=FileTrash') elif service == 'blob': flags.append('--from-to=BlobTrash') sas_token = client.sas_token if not sas_token and client.account_key: sas_token = _generate_sas_token(cmd, client.account_name, client.account_key, service=service, resource_types='co', permissions='rdl') azcopy.remove(_add_url_sas(target, sas_token), flags=flags) # pylint: disable=unused-argument def storage_fs_directory_copy(cmd, source, destination, recursive=None, **kwargs): azcopy = AzCopy() if kwargs.get('token_credential'): azcopy = _azcopy_login_client(cmd) flags = [] if recursive: flags.append('--recursive') azcopy.copy(source, destination, flags=flags) def storage_blob_sync(cmd, client, source, destination, delete_destination='true', exclude_pattern=None, include_pattern=None, exclude_path=None, extra_options=None): azcopy = _azcopy_blob_client(cmd, client) flags = [] if delete_destination is not None: flags.append('--delete-destination=' + delete_destination) if include_pattern is not None: flags.append('--include-pattern=' + include_pattern) if exclude_pattern is not None: flags.append('--exclude-pattern=' + exclude_pattern) if exclude_path is not None: flags.append('--exclude-path=' + exclude_path) if extra_options is not None: flags.extend(extra_options) sas_token = client.sas_token if not sas_token and client.account_key: sas_token = _generate_sas_token(cmd, client.account_name, client.account_key, service='blob', resource_types='co', permissions='rwdlac') azcopy.sync(source, _add_url_sas(destination, sas_token), flags=flags) def storage_run_command(cmd, command_args): if command_args.startswith('azcopy'): command_args = command_args[len('azcopy'):] azcopy = _azcopy_login_client(cmd) azcopy.run_command([command_args]) def _add_url_sas(url, sas): if not sas: return url return '{}?{}'.format(url, sas) def _azcopy_blob_client(cmd, client): return AzCopy(creds=client_auth_for_azcopy(cmd, client)) def _azcopy_file_client(cmd, client): return AzCopy(creds=client_auth_for_azcopy(cmd, client, service='file')) def _azcopy_login_client(cmd): return AzCopy(creds=login_auth_for_azcopy(cmd))
5,697
to balancer
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.utils.misc import reverse_dict from libcloud.common.cloudstack import CloudStackDriverMixIn from libcloud.loadbalancer.base import DEFAULT_ALGORITHM, Driver, Member, Algorithm, LoadBalancer from libcloud.loadbalancer.types import State, Provider class CloudStackLBDriver(CloudStackDriverMixIn, Driver): """Driver for CloudStack load balancers.""" api_name = "cloudstack_lb" name = "CloudStack" website = "http://cloudstack.org/" type = Provider.CLOUDSTACK _VALUE_TO_ALGORITHM_MAP = { "roundrobin": Algorithm.ROUND_ROBIN, "leastconn": Algorithm.LEAST_CONNECTIONS, } _ALGORITHM_TO_VALUE_MAP = reverse_dict(_VALUE_TO_ALGORITHM_MAP) LB_STATE_MAP = { "Active": State.RUNNING, } def __init__( self, key, secret=None, secure=True, host=None, path=None, port=None, *args, **kwargs, ): """ @inherits: :class:`Driver.__init__` """ host = host if host else self.host path = path if path else self.path if path is not None: self.path = path if host is not None: self.host = host if (self.type == Provider.CLOUDSTACK) and (not host or not path): raise Exception( "When instantiating CloudStack driver directly " + "you also need to provide host and path argument" ) super().__init__(key=key, secret=secret, secure=secure, host=host, port=port) def list_protocols(self): """ We don't actually have any protocol awareness beyond TCP. :rtype: ``list`` of ``str`` """ return ["tcp"] def list_balancers(self): balancers = self._sync_request(command="listLoadBalancerRules", method="GET") balancers = balancers.get("loadbalancerrule", []) return [self.METHOD_NAME(balancer) for balancer in balancers] def get_balancer(self, balancer_id): balancer = self._sync_request( command="listLoadBalancerRules", params={"id": balancer_id}, method="GET" ) balancer = balancer.get("loadbalancerrule", []) if not balancer: raise Exception("no such load balancer: " + str(balancer_id)) return self.METHOD_NAME(balancer[0]) def create_balancer( self, name, members, protocol="http", port=80, algorithm=DEFAULT_ALGORITHM, location=None, private_port=None, network_id=None, vpc_id=None, ): """ @inherits: :class:`Driver.create_balancer` :param location: Location :type location: :class:`NodeLocation` :param private_port: Private port :type private_port: ``int`` :param network_id: The guest network this rule will be created for. :type network_id: ``str`` """ args = {} ip_args = {} if location is None: locations = self._sync_request(command="listZones", method="GET") location = locations["zone"][0]["id"] else: location = location.id if private_port is None: private_port = port if network_id is not None: args["networkid"] = network_id ip_args["networkid"] = network_id if vpc_id is not None: ip_args["vpcid"] = vpc_id ip_args.update({"zoneid": location, "networkid": network_id, "vpc_id": vpc_id}) result = self._async_request(command="associateIpAddress", params=ip_args, method="GET") public_ip = result["ipaddress"] args.update( { "algorithm": self._ALGORITHM_TO_VALUE_MAP[algorithm], "name": name, "privateport": private_port, "publicport": port, "publicipid": public_ip["id"], } ) result = self._sync_request(command="createLoadBalancerRule", params=args, method="GET") listbalancers = self._sync_request( command="listLoadBalancerRules", params=args, method="GET" ) listbalancers = [ rule for rule in listbalancers["loadbalancerrule"] if rule["id"] == result["id"] ] if len(listbalancers) != 1: return None balancer = self.METHOD_NAME(listbalancers[0]) for member in members: balancer.attach_member(member) return balancer def destroy_balancer(self, balancer): self._async_request( command="deleteLoadBalancerRule", params={"id": balancer.id}, method="GET" ) self._async_request( command="disassociateIpAddress", params={"id": balancer.ex_public_ip_id}, method="GET", ) def balancer_attach_member(self, balancer, member): member.port = balancer.ex_private_port self._async_request( command="assignToLoadBalancerRule", params={"id": balancer.id, "virtualmachineids": member.id}, method="GET", ) return True def balancer_detach_member(self, balancer, member): self._async_request( command="removeFromLoadBalancerRule", params={"id": balancer.id, "virtualmachineids": member.id}, method="GET", ) return True def balancer_list_members(self, balancer): members = self._sync_request( command="listLoadBalancerRuleInstances", params={"id": balancer.id}, method="GET", ) members = members["loadbalancerruleinstance"] return [self._to_member(m, balancer.ex_private_port, balancer) for m in members] def METHOD_NAME(self, obj): balancer = LoadBalancer( id=obj["id"], name=obj["name"], state=self.LB_STATE_MAP.get(obj["state"], State.UNKNOWN), ip=obj["publicip"], port=obj["publicport"], driver=self.connection.driver, ) balancer.ex_private_port = obj["privateport"] balancer.ex_public_ip_id = obj["publicipid"] return balancer def _to_member(self, obj, port, balancer): return Member(id=obj["id"], ip=obj["nic"][0]["ipaddress"], port=port, balancer=balancer)
5,698
test hash v2 partition key definition
# The MIT License (MIT) # Copyright (c) 2014 Microsoft Corporation # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import unittest import pytest import uuid import azure.cosmos.partition_key as partition_key import azure.cosmos.cosmos_client as cosmos_client import test_config pytestmark = pytest.mark.cosmosEmulator @pytest.mark.usefixtures("teardown") class PartitionKeyTests(unittest.TestCase): """Tests to verify if non partitioned collections are properly accessed on migration with version 2018-12-31. """ host = test_config._test_config.host masterKey = test_config._test_config.masterKey connectionPolicy = test_config._test_config.connectionPolicy @classmethod def tearDownClass(cls): cls.client.delete_database(test_config._test_config.TEST_DATABASE_ID) @classmethod def setUpClass(cls): cls.client = cosmos_client.CosmosClient(cls.host, cls.masterKey, consistency_level="Session", connection_policy=cls.connectionPolicy) cls.created_db = cls.client.create_database_if_not_exists(test_config._test_config.TEST_DATABASE_ID) cls.created_collection = cls.created_db.create_container_if_not_exists(id=test_config._test_config.TEST_COLLECTION_MULTI_PARTITION_WITH_CUSTOM_PK_ID, partition_key=partition_key.PartitionKey(path="/pk")) def test_multi_partition_collection_read_document_with_no_pk(self): document_definition = {'id': str(uuid.uuid4())} self.created_collection.create_item(body=document_definition) read_item = self.created_collection.read_item(item=document_definition['id'], partition_key=partition_key.NonePartitionKeyValue) self.assertEqual(read_item['id'], document_definition['id']) self.created_collection.delete_item(item=document_definition['id'], partition_key=partition_key.NonePartitionKeyValue) def METHOD_NAME(self): created_container = self.created_db.create_container( id='container_with_pkd_v2' + str(uuid.uuid4()), partition_key=partition_key.PartitionKey(path="/id", kind="Hash") ) created_container_properties = created_container.read() self.assertEqual(created_container_properties['partitionKey']['version'], 2) self.created_db.delete_container(created_container) created_container = self.created_db.create_container( id='container_with_pkd_v2' + str(uuid.uuid4()), partition_key=partition_key.PartitionKey(path="/id", kind="Hash", version=2) ) created_container_properties = created_container.read() self.assertEqual(created_container_properties['partitionKey']['version'], 2) self.created_db.delete_container(created_container) def test_hash_v1_partition_key_definition(self): created_container = self.created_db.create_container( id='container_with_pkd_v2' + str(uuid.uuid4()), partition_key=partition_key.PartitionKey(path="/id", kind="Hash", version=1) ) created_container_properties = created_container.read() self.assertEqual(created_container_properties['partitionKey']['version'], 1) self.created_db.delete_container(created_container)
5,699
test not queued outside request response cycle
import datetime import importlib import time from datetime import timedelta from unittest import mock from django.conf import settings from django.core.signals import request_finished, request_started from django.test.testcases import TransactionTestCase from celery import group from post_request_task.task import _discard_tasks, _stop_queuing_tasks from olympia.amo.celery import app, create_chunked_tasks_signatures, task from olympia.amo.tests import TestCase from olympia.amo.utils import utc_millesecs_from_epoch fake_task_func = mock.Mock() def test_celery_routes_in_queues(): queues_in_queues = {q.name for q in settings.CELERY_TASK_QUEUES} # check the default queue is defined in CELERY_QUEUES assert settings.CELERY_TASK_DEFAULT_QUEUE in queues_in_queues queues_in_routes = {c['queue'] for c in settings.CELERY_TASK_ROUTES.values()} assert queues_in_queues == queues_in_routes def test_celery_routes_only_contain_valid_tasks(): # Import CELERY_IMPORTS like celery would to find additional tasks that # are not automatically imported at startup otherwise. for module_name in settings.CELERY_IMPORTS: importlib.import_module(module_name) # Force a re-discovery of the tasks - when running the tests the # autodiscovery might happen too soon. app.autodiscover_tasks(force=True) # Make sure all tasks in CELERY_TASK_ROUTES are known. known_tasks = app.tasks.keys() for task_name in settings.CELERY_TASK_ROUTES.keys(): assert task_name in known_tasks # Make sure all known tasks have an explicit route set. for task_name in known_tasks: assert task_name in settings.CELERY_TASK_ROUTES.keys() def test_create_chunked_tasks_signatures(): items = list(range(0, 6)) batch = create_chunked_tasks_signatures(fake_task, items, 2) assert isinstance(batch, group) assert len(batch) == 3 assert batch.tasks[0] == fake_task.si([items[0], items[1]]) assert batch.tasks[1] == fake_task.si([items[2], items[3]]) assert batch.tasks[2] == fake_task.si([items[4], items[5]]) batch = create_chunked_tasks_signatures( fake_task, items, 3, task_args=('foo', 'bar'), task_kwargs={'some': 'kwarg'}, ) assert isinstance(batch, group) assert len(batch) == 2 assert batch.tasks[0] == fake_task.si( [items[0], items[1], items[2]], 'foo', 'bar', some='kwarg' ) assert batch.tasks[1] == fake_task.si( [items[3], items[4], items[5]], 'foo', 'bar', some='kwarg' ) @task(ignore_result=False) def fake_task_with_result(): fake_task_func() return 'foobar' @task def fake_task(*args, **kwargs): fake_task_func() return 'foobar' @task(track_started=True, ignore_result=False) def sleeping_task(time_to_sleep): time.sleep(time_to_sleep) class TestCeleryWorker(TestCase): @mock.patch('olympia.amo.celery.cache') def test_start_task_timer(self, celery_cache): result = fake_task_with_result.delay() result.get() assert celery_cache.set.called assert celery_cache.set.call_args[0][0] == f'task_start_time.{result.id}' @mock.patch('olympia.amo.celery.cache') @mock.patch('olympia.amo.celery.statsd') def test_track_run_time(self, celery_statsd, celery_cache): minute_ago = datetime.datetime.now() - timedelta(minutes=1) task_start = utc_millesecs_from_epoch(minute_ago) celery_cache.get.return_value = task_start result = fake_task_with_result.delay() result.get() approx_run_time = utc_millesecs_from_epoch() - task_start assert ( celery_statsd.timing.call_args[0][0] == 'tasks.olympia.amo.tests.test_celery.fake_task_with_result' ) actual_run_time = celery_statsd.timing.call_args[0][1] fuzz = 2000 # 2 seconds assert actual_run_time >= (approx_run_time - fuzz) and actual_run_time <= ( approx_run_time + fuzz ) assert celery_cache.get.call_args[0][0] == f'task_start_time.{result.id}' assert celery_cache.delete.call_args[0][0] == f'task_start_time.{result.id}' @mock.patch('olympia.amo.celery.cache') @mock.patch('olympia.amo.celery.statsd') def test_handle_cache_miss_for_stats(self, celery_cache, celery_statsd): celery_cache.get.return_value = None # cache miss fake_task.delay() assert not celery_statsd.timing.called class TestTaskQueued(TransactionTestCase): """Test that tasks are queued and only triggered when a request finishes. Tests our integration with django-post-request-task. """ def setUp(self): super().setUp() fake_task_func.reset_mock() _discard_tasks() def tearDown(self): super().tearDown() fake_task_func.reset_mock() _discard_tasks() _stop_queuing_tasks() def METHOD_NAME(self): fake_task.delay() assert fake_task_func.call_count == 1 def test_queued_inside_request_response_cycle(self): request_started.send(sender=self) fake_task.delay() assert fake_task_func.call_count == 0 request_finished.send_robust(sender=self) assert fake_task_func.call_count == 1 def test_no_dedupe_outside_request_response_cycle(self): fake_task.delay() fake_task.delay() assert fake_task_func.call_count == 2 def test_dedupe_inside_request_response_cycle(self): request_started.send(sender=self) fake_task.delay() fake_task.delay() assert fake_task_func.call_count == 0 request_finished.send_robust(sender=self) assert fake_task_func.call_count == 1