id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
298,500 | cancel | # twisted is optional and self-contained in this module.
# We don't want to force it as a dependency but that means we also can't test it with type-checkers given the current setup.
from _typeshed import Incomplete
from typing import Generic, NamedTuple, TypeVar
import pika.connection
from pika.adapters.utils import nbio_interface
from twisted.internet.base import DelayedCall # type: ignore[import] # pyright: ignore[reportMissingImports]
from twisted.internet.defer import Deferred, DeferredQueue # type: ignore[import] # pyright: ignore[reportMissingImports]
from twisted.internet.interfaces import ITransport # type: ignore[import] # pyright: ignore[reportMissingImports]
from twisted.internet.protocol import Protocol # type: ignore[import] # pyright: ignore[reportMissingImports]
from twisted.python.failure import Failure # type: ignore[import] # pyright: ignore[reportMissingImports]
_T = TypeVar("_T")
LOGGER: Incomplete
class ClosableDeferredQueue(DeferredQueue[_T], Generic[_T]): # pyright: ignore[reportUntypedBaseClass]
closed: Failure | BaseException | None
def __init__(self, size: Incomplete | None = ..., backlog: Incomplete | None = ...) -> None: ...
# Returns a Deferred with an error if fails. None if success
def put(self, obj: _T) -> Deferred[Failure | BaseException] | None: ... # type: ignore[override]
def get(self) -> Deferred[Failure | BaseException | _T]: ... # type: ignore[override]
pending: Incomplete
def close(self, reason: BaseException | None) -> None: ...
class ReceivedMessage(NamedTuple):
channel: Incomplete
method: Incomplete
properties: Incomplete
body: Incomplete
class TwistedChannel:
on_closed: Deferred[Incomplete | Failure | BaseException | None]
def __init__(self, channel) -> None: ...
@property
def channel_number(self): ...
@property
def connection(self): ...
@property
def is_closed(self): ...
@property
def is_closing(self): ...
@property
def is_open(self): ...
@property
def flow_active(self): ...
@property
def consumer_tags(self): ...
def callback_deferred(self, deferred, replies) -> None: ...
def add_on_return_callback(self, callback): ...
def basic_ack(self, delivery_tag: int = ..., multiple: bool = ...): ...
def basic_cancel(self, consumer_tag: str = ...) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def basic_consume(
self,
queue,
auto_ack: bool = ...,
exclusive: bool = ...,
consumer_tag: Incomplete | None = ...,
arguments: Incomplete | None = ...,
) -> Deferred[Incomplete | Failure | BaseException]: ...
def basic_get(self, queue, auto_ack: bool = ...) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def basic_nack(self, delivery_tag: Incomplete | None = ..., multiple: bool = ..., requeue: bool = ...): ...
def basic_publish(
self, exchange, routing_key, body, properties: Incomplete | None = ..., mandatory: bool = ...
) -> Deferred[Incomplete | Failure | BaseException]: ...
def basic_qos(
self, prefetch_size: int = ..., prefetch_count: int = ..., global_qos: bool = ...
) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def basic_reject(self, delivery_tag, requeue: bool = ...): ...
def basic_recover(self, requeue: bool = ...) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def close(self, reply_code: int = ..., reply_text: str = ...): ...
def confirm_delivery(self) -> Deferred[Incomplete | None]: ...
def exchange_bind(
self, destination, source, routing_key: str = ..., arguments: Incomplete | None = ...
) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def exchange_declare(
self,
exchange,
exchange_type=...,
passive: bool = ...,
durable: bool = ...,
auto_delete: bool = ...,
internal: bool = ...,
arguments: Incomplete | None = ...,
) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def exchange_delete(
self, exchange: Incomplete | None = ..., if_unused: bool = ...
) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def exchange_unbind(
self,
destination: Incomplete | None = ...,
source: Incomplete | None = ...,
routing_key: str = ...,
arguments: Incomplete | None = ...,
) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def flow(self, active) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def open(self): ...
def queue_bind(
self, queue, exchange, routing_key: Incomplete | None = ..., arguments: Incomplete | None = ...
) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def queue_declare(
self,
queue,
passive: bool = ...,
durable: bool = ...,
exclusive: bool = ...,
auto_delete: bool = ...,
arguments: Incomplete | None = ...,
) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def queue_delete(
self, queue, if_unused: bool = ..., if_empty: bool = ...
) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def queue_purge(self, queue) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def queue_unbind(
self, queue, exchange: Incomplete | None = ..., routing_key: Incomplete | None = ..., arguments: Incomplete | None = ...
) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def tx_commit(self) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def tx_rollback(self) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def tx_select(self) -> Deferred[Incomplete | Failure | BaseException | None]: ...
class _TwistedConnectionAdapter(pika.connection.Connection):
def __init__(self, parameters, on_open_callback, on_open_error_callback, on_close_callback, custom_reactor) -> None: ...
def connection_made(self, transport: ITransport) -> None: ...
def connection_lost(self, error: Exception) -> None: ...
def data_received(self, data) -> None: ...
class TwistedProtocolConnection(Protocol): # pyright: ignore[reportUntypedBaseClass]
ready: Deferred[None] | None
closed: Deferred[None] | Failure | BaseException | None
def __init__(self, parameters: Incomplete | None = ..., custom_reactor: Incomplete | None = ...) -> None: ...
def channel(self, channel_number: Incomplete | None = ...): ...
@property
def is_open(self): ...
@property
def is_closed(self): ...
def close(self, reply_code: int = ..., reply_text: str = ...) -> Deferred[None] | Failure | BaseException | None: ...
def dataReceived(self, data) -> None: ...
def connectionLost(self, reason: Failure | BaseException = ...) -> None: ...
def makeConnection(self, transport: ITransport) -> None: ...
def connectionReady(self): ...
class _TimerHandle(nbio_interface.AbstractTimerReference):
def __init__(self, handle: DelayedCall) -> None: ...
def METHOD_NAME(self) -> None: ... |
298,501 | visit childof | # Generated from ECLsubset.g4 by ANTLR 4.10.1
from antlr4 import *
if __name__ is not None and "." in __name__:
from .ECLsubsetParser import ECLsubsetParser
else:
from ECLsubsetParser import ECLsubsetParser
# This class defines a complete generic visitor for a parse tree produced by ECLsubsetParser.
class ECLsubsetVisitor(ParseTreeVisitor):
# Visit a parse tree produced by ECLsubsetParser#expressionconstraint.
def visitExpressionconstraint(
self, ctx: ECLsubsetParser.ExpressionconstraintContext
):
return self.visitChildren(ctx)
# Visit a parse tree produced by ECLsubsetParser#compoundexpressionconstraint.
def visitCompoundexpressionconstraint(
self, ctx: ECLsubsetParser.CompoundexpressionconstraintContext
):
return self.visitChildren(ctx)
# Visit a parse tree produced by ECLsubsetParser#disjunctionexpressionconstraint.
def visitDisjunctionexpressionconstraint(
self, ctx: ECLsubsetParser.DisjunctionexpressionconstraintContext
):
return self.visitChildren(ctx)
# Visit a parse tree produced by ECLsubsetParser#exclusionexpressionconstraint.
def visitExclusionexpressionconstraint(
self, ctx: ECLsubsetParser.ExclusionexpressionconstraintContext
):
return self.visitChildren(ctx)
# Visit a parse tree produced by ECLsubsetParser#subexpressionconstraint.
def visitSubexpressionconstraint(
self, ctx: ECLsubsetParser.SubexpressionconstraintContext
):
return self.visitChildren(ctx)
# Visit a parse tree produced by ECLsubsetParser#eclfocusconcept.
def visitEclfocusconcept(self, ctx: ECLsubsetParser.EclfocusconceptContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ECLsubsetParser#eclconceptreference.
def visitEclconceptreference(self, ctx: ECLsubsetParser.EclconceptreferenceContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ECLsubsetParser#conceptid.
def visitConceptid(self, ctx: ECLsubsetParser.ConceptidContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ECLsubsetParser#constraintoperator.
def visitConstraintoperator(self, ctx: ECLsubsetParser.ConstraintoperatorContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ECLsubsetParser#descendantof.
def visitDescendantof(self, ctx: ECLsubsetParser.DescendantofContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ECLsubsetParser#descendantorselfof.
def visitDescendantorselfof(self, ctx: ECLsubsetParser.DescendantorselfofContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ECLsubsetParser#childof.
def METHOD_NAME(self, ctx: ECLsubsetParser.ChildofContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ECLsubsetParser#ancestorof.
def visitAncestorof(self, ctx: ECLsubsetParser.AncestorofContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ECLsubsetParser#ancestororselfof.
def visitAncestororselfof(self, ctx: ECLsubsetParser.AncestororselfofContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ECLsubsetParser#parentof.
def visitParentof(self, ctx: ECLsubsetParser.ParentofContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ECLsubsetParser#disjunction.
def visitDisjunction(self, ctx: ECLsubsetParser.DisjunctionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ECLsubsetParser#exclusion.
def visitExclusion(self, ctx: ECLsubsetParser.ExclusionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ECLsubsetParser#sctid.
def visitSctid(self, ctx: ECLsubsetParser.SctidContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ECLsubsetParser#ws.
def visitWs(self, ctx: ECLsubsetParser.WsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ECLsubsetParser#mws.
def visitMws(self, ctx: ECLsubsetParser.MwsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ECLsubsetParser#comment.
def visitComment(self, ctx: ECLsubsetParser.CommentContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ECLsubsetParser#nonstarchar.
def visitNonstarchar(self, ctx: ECLsubsetParser.NonstarcharContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ECLsubsetParser#starwithnonfslash.
def visitStarwithnonfslash(self, ctx: ECLsubsetParser.StarwithnonfslashContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ECLsubsetParser#nonfslash.
def visitNonfslash(self, ctx: ECLsubsetParser.NonfslashContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ECLsubsetParser#sp.
def visitSp(self, ctx: ECLsubsetParser.SpContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ECLsubsetParser#htab.
def visitHtab(self, ctx: ECLsubsetParser.HtabContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ECLsubsetParser#cr.
def visitCr(self, ctx: ECLsubsetParser.CrContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ECLsubsetParser#lf.
def visitLf(self, ctx: ECLsubsetParser.LfContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ECLsubsetParser#qm.
def visitQm(self, ctx: ECLsubsetParser.QmContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ECLsubsetParser#bs.
def visitBs(self, ctx: ECLsubsetParser.BsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ECLsubsetParser#digit.
def visitDigit(self, ctx: ECLsubsetParser.DigitContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ECLsubsetParser#zero.
def visitZero(self, ctx: ECLsubsetParser.ZeroContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ECLsubsetParser#digitnonzero.
def visitDigitnonzero(self, ctx: ECLsubsetParser.DigitnonzeroContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ECLsubsetParser#nonwsnonpipe.
def visitNonwsnonpipe(self, ctx: ECLsubsetParser.NonwsnonpipeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ECLsubsetParser#anynonescapedchar.
def visitAnynonescapedchar(self, ctx: ECLsubsetParser.AnynonescapedcharContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ECLsubsetParser#escapedchar.
def visitEscapedchar(self, ctx: ECLsubsetParser.EscapedcharContext):
return self.visitChildren(ctx)
del ECLsubsetParser |
298,502 | process | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
from bpy.props import StringProperty, EnumProperty, BoolProperty
from sverchok.utils.sv_bmesh_utils import *
import numpy as np
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode
from sverchok.utils.handle_blender_data import keep_enum_reference
class SvUVtextureNode(SverchCustomTreeNode, bpy.types.Node):
''' UV texture node '''
bl_idname = 'SvUVtextureNode'
bl_label = 'UVtextures'
bl_icon = 'MATERIAL'
is_animation_dependent = True
is_scene_dependent = True
def sv_init(self, context):
self.inputs.new('SvObjectSocket', "Object")
self.outputs.new('SvVerticesSocket', "Verts")
self.outputs.new('SvStringsSocket', "Pols")
def avail_objects(self, context):
items = [('','','')]
if self.inputs and self.inputs[0].is_linked:
objects = self.inputs[0].sv_get()
items = [(obj.name, obj.name, '') for obj in objects]
return items
@keep_enum_reference
def avail_uvs(self, context):
items = [('','','')]
if self.inputs and self.inputs[0].is_linked:
obj = bpy.data.objects[self.objects]
if obj.data.uv_layers:
items = [(p.name, p.name, "") for p in obj.data.uv_layers]
return items
objects: EnumProperty(items=avail_objects, name="Objects",
description="Choose Objects", update=updateNode)
uv: EnumProperty(items=avail_uvs, name="UV",
description="Choose UV to load", update=updateNode)
def sv_draw_buttons(self, context, layout):
layout.prop(self, 'uv', text='uv')
def UV(self, object, uv):
# makes UV from layout texture area to sverchok vertices and polygons.
mesh = object.data
bm = bmesh.new()
bm.from_mesh(mesh)
uv_layer = bm.loops.layers.uv[uv]
nFaces = len(bm.faces)
bm.verts.ensure_lookup_table()
bm.faces.ensure_lookup_table()
vertices_dict = {}
polygons_new = []
areas = []
for fi in range(nFaces):
polygons_new_pol = []
areas.append(bm.faces[fi].calc_area())
for loop in bm.faces[fi].loops:
li = loop.index
polygons_new_pol.append(li)
vertices_dict[li] = list(loop[uv_layer].uv[:])+[0]
polygons_new.append(polygons_new_pol)
vertices_new = [i for i in vertices_dict.values()]
bm_roll = bmesh_from_pydata(verts=vertices_new,edges=[],faces=polygons_new)
bm_roll.verts.ensure_lookup_table()
bm_roll.faces.ensure_lookup_table()
areas_roll = []
for fi in range(nFaces):
areas_roll.append(bm_roll.faces[fi].calc_area())
np_area_origin = np.array(areas).mean()
np_area_roll = np.array(areas_roll).mean()
mult = np.sqrt(np_area_origin/np_area_roll)
np_ver = np.array(vertices_new)
#(print(np_area_origin,np_area_roll,mult,'плориг, плразв, множитель'))
vertices_new = (np_ver*mult).tolist()
bm.clear()
del bm
bm_roll.clear()
del bm_roll
return [vertices_new], [polygons_new]
def METHOD_NAME(self):
if self.inputs and self.inputs[0].is_linked:
obj = bpy.data.objects[self.objects]
if not self.uv:
print ('!!! for node:',self.name,'!!! object',self.objects,'have no UV')
if self.outputs and self.outputs[0].is_linked:
self.outputs[0].sv_set([[]])
return
uv = self.uv
v,p = self.UV(obj,uv)
if self.outputs and self.outputs[0].is_linked:
self.outputs[0].sv_set(v)
if self.outputs and self.outputs[1].is_linked:
self.outputs[1].sv_set(p)
def register():
bpy.utils.register_class(SvUVtextureNode)
def unregister():
bpy.utils.unregister_class(SvUVtextureNode) |
298,503 | visit arguments | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
import json
from edb.common.ast import codegen
class GraphQLSourceGenerator(codegen.SourceGenerator):
def generic_visit(self, node):
raise RuntimeError(
'No method to generate code for %s' % node.__class__.__name__)
def _visit_list(self, items, separator=None):
for item in items:
self.visit(item)
if item is not items[-1] and separator:
self.write(separator)
def METHOD_NAME(self, node):
if node.arguments:
self.write('(')
self._visit_list(node.arguments, separator=', ')
self.write(')')
def _visit_directives(self, node):
if node.directives:
self.write(' ')
self._visit_list(node.directives, separator=', ')
def _visit_type_condition(self, node):
if node.type_condition:
self.write(' on ')
self.visit(node.type_condition)
def visit_NameNode(self, node):
self.write(node.value)
def visit_DocumentNode(self, node):
self._visit_list(node.definitions)
def visit_OperationDefinitionNode(self, node):
if node.operation:
self.write(node.operation)
if node.name:
self.write(' ')
self.visit(node.name)
if node.variable_definitions:
self.write('(')
self._visit_list(node.variable_definitions, separator=', ')
self.write(')')
self._visit_directives(node)
self.visit(node.selection_set)
def visit_FragmentDefinitionNode(self, node):
self.write('fragment ')
self.visit(node.name)
self._visit_type_condition(node)
self._visit_directives(node)
self.visit(node.selection_set)
def visit_SelectionSetNode(self, node):
self.write('{')
self.new_lines = 1
self.indentation += 1
self._visit_list(node.selections)
self.indentation -= 1
self.write('}')
self.new_lines = 2
def visit_FieldNode(self, node):
if node.alias:
self.visit(node.alias)
self.write(': ')
self.visit(node.name)
self.METHOD_NAME(node)
self._visit_directives(node)
if node.selection_set:
self.visit(node.selection_set)
else:
self.new_lines = 1
def visit_FragmentSpreadNode(self, node):
self.write('...')
self.visit(node.name)
self._visit_directives(node)
self.new_lines = 1
def visit_InlineFragmentNode(self, node):
self.write('...')
self._visit_type_condition(node)
self._visit_directives(node)
self.visit(node.selection_set)
def visit_ArgumentNode(self, node):
self.visit(node.name)
self.write(': ')
self.visit(node.value)
def visit_ObjectFieldNode(self, node):
self.visit_Argument(node)
self.new_lines = 1
def visit_VariableDefinitionNode(self, node):
self.visit(node.variable)
self.write(': ')
self.visit(node.type)
if node.default_value:
self.write(' = ')
self.visit(node.default_value)
def visit_DirectiveNode(self, node):
self.write('@')
self.visit(node.name)
self.METHOD_NAME(node)
def visit_StringValueNode(self, node):
# the GQL string works same as JSON string
self.write(json.dumps(node.value))
def visit_IntValueNode(self, node):
self.write(node.value)
def visit_FloatValueNode(self, node):
self.write(node.value)
def visit_BooleanValueNode(self, node):
if node.value:
self.write('true')
else:
self.write('false')
def visit_ListValueNode(self, node):
self.write('[')
self._visit_list(node.values, separator=', ')
self.write(']')
def visit_ObjectValueNode(self, node):
if node.fields:
self.write('{')
self.new_lines = 1
self.indentation += 1
self._visit_list(node.fields)
self.indentation -= 1
self.write('}')
else:
self.write('{}')
def visit_EnumValueNode(self, node):
self.write(node.value)
def visit_NullValueNode(self, node):
self.write('null')
def visit_VariableNode(self, node):
self.write('$')
self.visit(node.name)
def visit_NamedTypeNode(self, node):
self.visit(node.name)
def visit_ListTypeNode(self, node):
self.write('[')
self.visit(node.type)
self.write(']')
def visit_NonNullTypeNode(self, node):
self.visit(node.type)
self.write('!')
generate_source = GraphQLSourceGenerator.to_source |
298,504 | handle vex expr gsptr | from typing import Optional
import logging
import claripy
import pyvex
from . import irop
from . import ccall
from ..light import VEXMixin
from .... import errors
from .... import sim_options as o
l = logging.getLogger(__name__)
zero = claripy.BVV(0, 32)
def value(ty, val, size: Optional[int] = None):
if ty == "Ity_F32":
return claripy.FPV(float(val), claripy.FSORT_FLOAT)
elif ty == "Ity_F64":
return claripy.FPV(float(val), claripy.FSORT_DOUBLE)
else:
if size is not None:
return claripy.BVV(int(val), size)
return claripy.BVV(int(val), pyvex.get_type_size(ty))
def symbol(ty, name):
if ty == "Ity_F32":
return claripy.FPS(name, claripy.FSORT_FLOAT)
elif ty == "Ity_F64":
return claripy.FPS(name, claripy.FSORT_DOUBLE)
else:
return claripy.BVS(name, pyvex.get_type_size(ty))
class ClaripyDataMixin(VEXMixin):
"""
This mixin provides methods that makes the vex engine process guest code using claripy ASTs as the data domain.
"""
# util methods
def _is_true(self, v):
return claripy.is_true(v)
def _is_false(self, v):
return claripy.is_false(v)
def _optimize_guarded_addr(self, addr, guard):
# optimization: is the guard the same as the condition inside the address? if so, unpack the address and remove
# the guarding condition.
if (
isinstance(guard, claripy.ast.Base)
and guard.op == "If"
and isinstance(addr, claripy.ast.Base)
and addr.op == "If"
):
if guard.args[0] is addr.args[0]:
# the address is guarded by the same guard! unpack the addr
return addr.args[1]
return addr
# consts
def _handle_vex_const(self, const):
return value(const.type, const.value)
# statements
def _perform_vex_stmt_LoadG(self, addr, alt, guard, dst, cvt, end):
addr = self._optimize_guarded_addr(addr, guard)
super()._perform_vex_stmt_LoadG(addr, alt, guard, dst, cvt, end)
def _perform_vex_stmt_StoreG(self, addr, data, guard, ty, endness, **kwargs):
addr = self._optimize_guarded_addr(addr, guard)
super()._perform_vex_stmt_StoreG(addr, data, guard, ty, endness, **kwargs)
# is this right? do I care?
def METHOD_NAME(self, expr):
return zero
def _handle_vex_expr_VECRET(self, expr):
return zero
def _handle_vex_expr_Binder(self, expr):
return zero
# simple wrappers to implement the fp/bv data casting
def _perform_vex_expr_Get(self, offset, ty, **kwargs):
res = super()._perform_vex_expr_Get(offset, ty, **kwargs)
if ty.startswith("Ity_F"):
return res.raw_to_fp()
else:
return res
def _perform_vex_expr_Load(self, addr, ty, endness, **kwargs):
res = super()._perform_vex_expr_Load(addr, ty, endness, **kwargs)
if ty.startswith("Ity_F"):
return res.raw_to_fp()
else:
return res
def _perform_vex_stmt_Put(self, offset, data, **kwargs):
super()._perform_vex_stmt_Put(offset, data.raw_to_bv(), **kwargs)
def _perform_vex_stmt_Store(self, addr, data, endness, **kwargs):
super()._perform_vex_stmt_Store(addr, data.raw_to_bv(), endness, **kwargs)
# op support
def _perform_vex_expr_ITE(self, cond, ifTrue, ifFalse):
try:
return claripy.If(cond != 0, ifTrue, ifFalse)
except claripy.ClaripyError as e:
raise errors.SimError("Claripy failed") from e
def _perform_vex_expr_Op(self, op, args):
# TODO: get rid of these hacks (i.e. state options and modes) and move these switches into engine properties
options = getattr(self.state, "options", {o.SUPPORT_FLOATING_POINT})
simop = irop.vexop_to_simop(
op, extended=o.EXTENDED_IROP_SUPPORT in options, fp=o.SUPPORT_FLOATING_POINT in options
)
return simop.calculate(*args)
# ccall support
def _perform_vex_expr_CCall(self, func_name, ty, args, func=None):
if func is None:
try:
func = getattr(ccall, func_name)
except AttributeError as e:
raise errors.UnsupportedCCallError(f"Unsupported ccall {func_name}") from e
try:
return func(self.state, *args)
except ccall.CCallMultivaluedException as e:
cases, to_replace = e.args
# pylint: disable=undefined-loop-variable
for i, arg in enumerate(args):
if arg is to_replace:
break
else:
raise errors.UnsupportedCCallError("Trying to concretize a value which is not an argument")
evaluated_cases = [(case, func(self.state, *args[:i], value_, *args[i + 1 :])) for case, value_ in cases]
try:
return claripy.ite_cases(evaluated_cases, value(ty, 0))
except claripy.ClaripyError as ce:
raise errors.SimOperationError("Claripy failed") from ce |
298,505 | test job split | #
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import pytest
from facebook_business.api import FacebookAdsApiBatch
from source_facebook_marketing.api import MyFacebookAdsApi
from source_facebook_marketing.streams.async_job import InsightAsyncJob, ParentAsyncJob
from source_facebook_marketing.streams.async_job_manager import InsightAsyncJobManager
from source_facebook_marketing.streams.common import JobException
@pytest.fixture(name="api")
def api_fixture(mocker):
api = mocker.Mock()
api.api.ads_insights_throttle = MyFacebookAdsApi.Throttle(0, 0)
api.api.new_batch.return_value = mocker.MagicMock(spec=FacebookAdsApiBatch)
return api
@pytest.fixture(name="time_mock")
def time_mock_fixture(mocker):
return mocker.patch("source_facebook_marketing.streams.async_job_manager.time")
@pytest.fixture(name="update_job_mock")
def update_job_mock_fixture(mocker):
return mocker.patch("source_facebook_marketing.streams.async_job_manager.update_in_batch")
class TestInsightAsyncManager:
def test_jobs_empty(self, api):
"""Should work event without jobs"""
manager = InsightAsyncJobManager(api=api, jobs=[])
jobs = list(manager.completed_jobs())
assert not jobs
def test_jobs_completed_immediately(self, api, mocker, time_mock):
"""Manager should emmit jobs without waiting if they completed"""
jobs = [
mocker.Mock(spec=InsightAsyncJob, attempt_number=1, failed=False),
mocker.Mock(spec=InsightAsyncJob, attempt_number=1, failed=False),
]
manager = InsightAsyncJobManager(api=api, jobs=jobs)
completed_jobs = list(manager.completed_jobs())
assert jobs == completed_jobs
time_mock.sleep.assert_not_called()
def test_jobs_wait(self, api, mocker, time_mock, update_job_mock):
"""Manager should return completed jobs and wait for others"""
def update_job_behaviour():
jobs[1].completed = True
yield
yield
jobs[0].completed = True
yield
update_job_mock.side_effect = update_job_behaviour()
jobs = [
mocker.Mock(spec=InsightAsyncJob, attempt_number=1, failed=False, completed=False),
mocker.Mock(spec=InsightAsyncJob, attempt_number=1, failed=False, completed=False),
]
manager = InsightAsyncJobManager(api=api, jobs=jobs)
job = next(manager.completed_jobs(), None)
assert job == jobs[1]
time_mock.sleep.assert_not_called()
job = next(manager.completed_jobs(), None)
assert job == jobs[0]
time_mock.sleep.assert_called_with(InsightAsyncJobManager.JOB_STATUS_UPDATE_SLEEP_SECONDS)
job = next(manager.completed_jobs(), None)
assert job is None
def test_job_restarted(self, api, mocker, time_mock, update_job_mock):
"""Manager should restart failed jobs"""
def update_job_behaviour():
jobs[1].failed = True
yield
jobs[1].failed = False
jobs[1].completed = True
yield
update_job_mock.side_effect = update_job_behaviour()
jobs = [
mocker.Mock(spec=InsightAsyncJob, attempt_number=1, failed=False, completed=True),
mocker.Mock(spec=InsightAsyncJob, attempt_number=1, failed=False, completed=False),
]
manager = InsightAsyncJobManager(api=api, jobs=jobs)
job = next(manager.completed_jobs(), None)
assert job == jobs[0]
jobs[1].restart.assert_called_once()
job = next(manager.completed_jobs(), None)
assert job == jobs[1]
job = next(manager.completed_jobs(), None)
assert job is None
def METHOD_NAME(self, api, mocker, time_mock, update_job_mock):
"""Manager should split failed jobs when they fail second time"""
def update_job_behaviour():
jobs[1].failed = True
jobs[1].attempt_number = 2
yield from range(10)
update_job_mock.side_effect = update_job_behaviour()
jobs = [
mocker.Mock(spec=InsightAsyncJob, attempt_number=1, failed=False, completed=True),
mocker.Mock(spec=InsightAsyncJob, attempt_number=1, failed=False, completed=False),
]
sub_jobs = [
mocker.Mock(spec=InsightAsyncJob, attempt_number=1, failed=False, completed=True),
mocker.Mock(spec=InsightAsyncJob, attempt_number=1, failed=False, completed=True),
]
sub_jobs[0].get_result.return_value = [1, 2]
sub_jobs[1].get_result.return_value = [3, 4]
jobs[1].split_job.return_value = sub_jobs
manager = InsightAsyncJobManager(api=api, jobs=jobs)
job = next(manager.completed_jobs(), None)
assert job == jobs[0]
jobs[1].split_job.assert_called_once()
job = next(manager.completed_jobs(), None)
assert isinstance(job, ParentAsyncJob)
assert list(job.get_result()) == [1, 2, 3, 4]
job = next(manager.completed_jobs(), None)
assert job is None
def test_job_failed_too_many_times(self, api, mocker, time_mock, update_job_mock):
"""Manager should fail when job failed too many times"""
def update_job_behaviour():
jobs[1].failed = True
jobs[1].attempt_number = InsightAsyncJobManager.MAX_NUMBER_OF_ATTEMPTS
yield from range(10)
update_job_mock.side_effect = update_job_behaviour()
jobs = [
mocker.Mock(spec=InsightAsyncJob, attempt_number=1, failed=False, completed=True),
mocker.Mock(spec=InsightAsyncJob, attempt_number=1, failed=False, completed=False),
]
manager = InsightAsyncJobManager(api=api, jobs=jobs)
with pytest.raises(JobException, match=f"{jobs[1]}: failed more than {InsightAsyncJobManager.MAX_NUMBER_OF_ATTEMPTS} times."):
next(manager.completed_jobs(), None)
def test_nested_job_failed_too_many_times(self, api, mocker, time_mock, update_job_mock):
"""Manager should fail when a nested job within a ParentAsyncJob failed too many times"""
def update_job_behaviour():
jobs[1].failed = True
sub_jobs[1].failed = True
sub_jobs[1].attempt_number = InsightAsyncJobManager.MAX_NUMBER_OF_ATTEMPTS
yield from range(10)
update_job_mock.side_effect = update_job_behaviour()
sub_jobs = [
mocker.Mock(spec=InsightAsyncJob, attempt_number=1, failed=False, completed=True),
mocker.Mock(spec=InsightAsyncJob, attempt_number=1, failed=False, completed=False),
]
jobs = [
mocker.Mock(spec=InsightAsyncJob, attempt_number=1, failed=False, completed=True),
mocker.Mock(spec=ParentAsyncJob, _jobs=sub_jobs, attempt_number=1, failed=False, completed=False),
]
manager = InsightAsyncJobManager(api=api, jobs=jobs)
with pytest.raises(JobException):
next(manager.completed_jobs(), None) |
298,506 | test convert quantizable conv integer no conv | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import onnx
from sparseml.exporters.transforms import ConvToConvIntegerAddCastMul
from sparsezoo.utils import validate_onnx
from tests.sparseml.exporters.transforms.test_onnx_transform import (
_create_model as _create_model_no_conv,
)
def _create_test_model():
"""
Creates a test model with a convolution node and quantize/dequantize nodes
| Starting with:
| INPUT QuantizeLinear (with constant kernel)
| | |
| QuantizeLinear DequantizeLinear
| | |
| DequantizeLinear |
| | |
| Conv (with bias)
| |
| OUTPUT
"""
x_scale = onnx.helper.make_tensor("x_scale", onnx.TensorProto.FLOAT, (1,), [1])
y_scale = onnx.helper.make_tensor("y_scale", onnx.TensorProto.FLOAT, (1,), [1])
zero_point = onnx.helper.make_tensor("zero_point", onnx.TensorProto.INT8, (1,), [1])
bias = onnx.helper.make_tensor("bias", onnx.TensorProto.FLOAT, (3,), [1, 1, 1])
input = onnx.helper.make_tensor_value_info("input", onnx.TensorProto.FLOAT, (3, 3))
output = onnx.helper.make_tensor_value_info("output", onnx.TensorProto.FLOAT, (1,))
quantize_linear_node_0 = onnx.helper.make_node(
"QuantizeLinear",
["input", "y_scale", "zero_point"],
["quant_linear_0_output"],
name="quantize_linear_node_0",
)
dequantize_linear_node_0 = onnx.helper.make_node(
"DequantizeLinear",
["quant_linear_0_output", "x_scale", "zero_point"],
["dequant_linear_0_output"],
name="dequantize_linear_node_0",
)
kernel = onnx.helper.make_tensor(
"kernel", onnx.TensorProto.FLOAT, (3, 3), [1, 1, 1, 1, 1, 1, 1, 1, 1]
)
quantize_linear_node_1 = onnx.helper.make_node(
"QuantizeLinear",
["kernel", "y_scale", "zero_point"],
["quant_linear_1_output"],
name="quantize_linear_node_1",
)
dequantize_linear_node_1 = onnx.helper.make_node(
"DequantizeLinear",
["quant_linear_1_output", "x_scale", "zero_point"],
["dequant_linear_1_output"],
name="dequantize_linear_node_1",
)
conv_node = onnx.helper.make_node(
"Conv",
inputs=["dequant_linear_0_output", "dequant_linear_1_output", "bias"],
outputs=["conv_node_output"],
kernel_shape=[3, 3],
name="conv_node",
)
graph = onnx.helper.make_graph(
nodes=[
quantize_linear_node_0,
dequantize_linear_node_0,
quantize_linear_node_1,
dequantize_linear_node_1,
conv_node,
],
name="g",
inputs=[input],
outputs=[output],
initializer=[y_scale, x_scale, bias, zero_point, kernel],
)
model = onnx.helper.make_model(graph)
validate_onnx(model)
return model
def test_convert_quantizable_conv_integer():
model = _create_test_model()
transform = ConvToConvIntegerAddCastMul()
model = transform(model)
validate_onnx(model)
assert [node.name for node in model.graph.node] == [
"quantize_linear_node_0",
"conv_node_quant",
"conv_node_bias_add_quant",
"conv_node_bias_add_quant_cast",
"conv_node_bias_add_quant_rescale_mul",
]
assert [node.name for node in model.graph.initializer] == [
"y_scale",
"zero_point",
"conv_node.weight_quantized",
"conv_node_bias_add.bias_quantized",
"conv_node_bias_add.bias_quantized.scale",
]
def METHOD_NAME():
model_in = _create_model_no_conv()
nodes_in = [node.name for node in model_in.graph.node]
transform = ConvToConvIntegerAddCastMul()
model_out = transform(model_in)
validate_onnx(model_out)
assert [node.name for node in model_out.graph.node] == nodes_in |
298,507 | test queue | import queue
import sched
import threading
import time
import unittest
from test import support
TIMEOUT = 10
class Timer:
def __init__(self):
self._cond = threading.Condition()
self._time = 0
self._stop = 0
def time(self):
with self._cond:
return self._time
# increase the time but not beyond the established limit
def sleep(self, t):
assert t >= 0
with self._cond:
t += self._time
while self._stop < t:
self._time = self._stop
self._cond.wait()
self._time = t
# advance time limit for user code
def advance(self, t):
assert t >= 0
with self._cond:
self._stop += t
self._cond.notify_all()
class TestCase(unittest.TestCase):
def test_enter(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
for x in [0.5, 0.4, 0.3, 0.2, 0.1]:
z = scheduler.enter(x, 1, fun, (x,))
scheduler.run()
self.assertEqual(l, [0.1, 0.2, 0.3, 0.4, 0.5])
def test_enterabs(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
for x in [0.05, 0.04, 0.03, 0.02, 0.01]:
z = scheduler.enterabs(x, 1, fun, (x,))
scheduler.run()
self.assertEqual(l, [0.01, 0.02, 0.03, 0.04, 0.05])
def test_enter_concurrent(self):
q = queue.Queue()
fun = q.put
timer = Timer()
scheduler = sched.scheduler(timer.time, timer.sleep)
scheduler.enter(1, 1, fun, (1,))
scheduler.enter(3, 1, fun, (3,))
t = threading.Thread(target=scheduler.run)
t.start()
timer.advance(1)
self.assertEqual(q.get(timeout=TIMEOUT), 1)
self.assertTrue(q.empty())
for x in [4, 5, 2]:
z = scheduler.enter(x - 1, 1, fun, (x,))
timer.advance(2)
self.assertEqual(q.get(timeout=TIMEOUT), 2)
self.assertEqual(q.get(timeout=TIMEOUT), 3)
self.assertTrue(q.empty())
timer.advance(1)
self.assertEqual(q.get(timeout=TIMEOUT), 4)
self.assertTrue(q.empty())
timer.advance(1)
self.assertEqual(q.get(timeout=TIMEOUT), 5)
self.assertTrue(q.empty())
timer.advance(1000)
support.join_thread(t, timeout=TIMEOUT)
self.assertTrue(q.empty())
self.assertEqual(timer.time(), 5)
def test_priority(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
for priority in [1, 2, 3, 4, 5]:
z = scheduler.enterabs(0.01, priority, fun, (priority,))
scheduler.run()
self.assertEqual(l, [1, 2, 3, 4, 5])
def test_cancel(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
now = time.time()
event1 = scheduler.enterabs(now + 0.01, 1, fun, (0.01,))
event2 = scheduler.enterabs(now + 0.02, 1, fun, (0.02,))
event3 = scheduler.enterabs(now + 0.03, 1, fun, (0.03,))
event4 = scheduler.enterabs(now + 0.04, 1, fun, (0.04,))
event5 = scheduler.enterabs(now + 0.05, 1, fun, (0.05,))
scheduler.cancel(event1)
scheduler.cancel(event5)
scheduler.run()
self.assertEqual(l, [0.02, 0.03, 0.04])
def test_cancel_concurrent(self):
q = queue.Queue()
fun = q.put
timer = Timer()
scheduler = sched.scheduler(timer.time, timer.sleep)
now = timer.time()
event1 = scheduler.enterabs(now + 1, 1, fun, (1,))
event2 = scheduler.enterabs(now + 2, 1, fun, (2,))
event4 = scheduler.enterabs(now + 4, 1, fun, (4,))
event5 = scheduler.enterabs(now + 5, 1, fun, (5,))
event3 = scheduler.enterabs(now + 3, 1, fun, (3,))
t = threading.Thread(target=scheduler.run)
t.start()
timer.advance(1)
self.assertEqual(q.get(timeout=TIMEOUT), 1)
self.assertTrue(q.empty())
scheduler.cancel(event2)
scheduler.cancel(event5)
timer.advance(1)
self.assertTrue(q.empty())
timer.advance(1)
self.assertEqual(q.get(timeout=TIMEOUT), 3)
self.assertTrue(q.empty())
timer.advance(1)
self.assertEqual(q.get(timeout=TIMEOUT), 4)
self.assertTrue(q.empty())
timer.advance(1000)
support.join_thread(t, timeout=TIMEOUT)
self.assertTrue(q.empty())
self.assertEqual(timer.time(), 4)
def test_empty(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
self.assertTrue(scheduler.empty())
for x in [0.05, 0.04, 0.03, 0.02, 0.01]:
z = scheduler.enterabs(x, 1, fun, (x,))
self.assertFalse(scheduler.empty())
scheduler.run()
self.assertTrue(scheduler.empty())
def METHOD_NAME(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
now = time.time()
e5 = scheduler.enterabs(now + 0.05, 1, fun)
e1 = scheduler.enterabs(now + 0.01, 1, fun)
e2 = scheduler.enterabs(now + 0.02, 1, fun)
e4 = scheduler.enterabs(now + 0.04, 1, fun)
e3 = scheduler.enterabs(now + 0.03, 1, fun)
# queue property is supposed to return an order list of
# upcoming events
self.assertEqual(scheduler.queue, [e1, e2, e3, e4, e5])
def test_args_kwargs(self):
seq = []
def fun(*a, **b):
seq.append((a, b))
now = time.time()
scheduler = sched.scheduler(time.time, time.sleep)
scheduler.enterabs(now, 1, fun)
scheduler.enterabs(now, 1, fun, argument=(1, 2))
scheduler.enterabs(now, 1, fun, argument=('a', 'b'))
scheduler.enterabs(now, 1, fun, argument=(1, 2), kwargs={"foo": 3})
scheduler.run()
self.assertCountEqual(seq, [
((), {}),
((1, 2), {}),
(('a', 'b'), {}),
((1, 2), {'foo': 3})
])
def test_run_non_blocking(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
for x in [10, 9, 8, 7, 6]:
scheduler.enter(x, 1, fun, (x,))
scheduler.run(blocking=False)
self.assertEqual(l, [])
if __name__ == "__main__":
unittest.main() |
298,508 | get io sources | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# ----------------------------------------------------------------------------
import io
import gzip
import bz2
import tempfile
import itertools
import requests
from skbio.io import IOSourceError
from ._fileobject import (IterableStringWriterIO, IterableStringReaderIO,
WrappedBufferedRandom)
# NamedTemporaryFile isn't an actual file class, it is a function which
# returns _TemporaryFileWrapper around a normal file object. Instead of
# relying on this implementation, we take whatever the class of the result of
# NamedTemporaryFile is.
with tempfile.NamedTemporaryFile() as fh:
_WrappedTemporaryFile = type(fh)
def METHOD_NAME():
return (
# The order of these source is significant as they will short-circuit
HTTPSource,
FilePathSource,
BytesIOSource,
BufferedIOSource,
TextIOSource,
WrappedTemporaryFileSource,
IterableSource
)
def _compressors():
return (
GzipCompressor,
BZ2Compressor
)
def get_compression_handler(name):
compressors = {c.name: c for c in _compressors()}
compressors['auto'] = AutoCompressor
return compressors.get(name, False)
class IOSource:
closeable = True
def __init__(self, file, options):
self.file = file
self.options = options
def can_read(self):
return False
def can_write(self):
return False
def get_reader(self):
raise NotImplementedError()
def get_writer(self):
raise NotImplementedError()
class Compressor(IOSource):
streamable = True
name = ''
def can_write(self):
return True
class FilePathSource(IOSource):
def can_read(self):
return isinstance(self.file, str)
def can_write(self):
return self.can_read()
def get_reader(self):
return io.open(self.file, mode='rb')
def get_writer(self):
return io.open(self.file, mode='wb')
class HTTPSource(IOSource):
def can_read(self):
return (
isinstance(self.file, str) and
requests.compat.urlparse(self.file).scheme in {'http', 'https'})
def get_reader(self):
req = requests.get(self.file)
# if the response is not 200, an exception will be raised
req.raise_for_status()
return io.BufferedReader(io.BytesIO(req.content))
class BytesIOSource(IOSource):
closeable = False
def can_read(self):
return isinstance(self.file, io.BytesIO)
def can_write(self):
return self.can_read()
def get_reader(self):
return WrappedBufferedRandom(self.file)
def get_writer(self):
return self.get_reader()
class BufferedIOSource(IOSource):
closeable = False
def can_read(self):
# `peek` is part of the API we want to guarantee, so we can't just look
# for io.BufferedIOBase. Despite the fact that the C implementation of
# io.BufferedRandom inherits io.BufferedReader/Writer it is not
# reflected in an isinstance check, so we need to check for it manually
return isinstance(self.file, (io.BufferedReader, io.BufferedRandom))
def can_write(self):
return isinstance(self.file, (io.BufferedWriter, io.BufferedRandom))
def get_reader(self):
return self.file
def get_writer(self):
return self.file
class TextIOSource(IOSource):
closeable = False
def can_read(self):
return isinstance(self.file, io.TextIOBase) and self.file.readable()
def can_write(self):
return isinstance(self.file, io.TextIOBase) and self.file.writable()
def get_reader(self):
return self.file
def get_writer(self):
return self.file
class WrappedTemporaryFileSource(IOSource):
closeable = False
def can_read(self):
return (isinstance(self.file, _WrappedTemporaryFile) and
self.file.readable())
def can_write(self):
return (isinstance(self.file, _WrappedTemporaryFile) and
self.file.writable())
def get_reader(self):
# _TemporaryFileWrapper has a file attribute which is an actual fileobj
return self.file.file
def get_writer(self):
return self.file.file
class IterableSource(IOSource):
def can_read(self):
if hasattr(self.file, '__iter__'):
iterator = iter(self.file)
head = next(iterator, None)
if head is None:
self.repaired = []
return True
if isinstance(head, str):
self.repaired = itertools.chain([head], iterator)
return True
else:
# We may have mangled a generator at this point, so just abort
raise IOSourceError(
"Could not open source: %r (mode: %r)" %
(self.file, self.options['mode']))
return False
def can_write(self):
return hasattr(self.file, 'append') and hasattr(self.file, '__iter__')
def get_reader(self):
return IterableStringReaderIO(self.repaired,
newline=self.options['newline'])
def get_writer(self):
return IterableStringWriterIO(self.file,
newline=self.options['newline'])
class GzipCompressor(Compressor):
name = 'gzip'
streamable = True
def can_read(self):
return self.file.peek(2)[:2] == b'\x1f\x8b'
def get_reader(self):
return gzip.GzipFile(fileobj=self.file)
def get_writer(self):
return gzip.GzipFile(fileobj=self.file, mode='wb',
compresslevel=self.options['compresslevel'])
class BZ2Compressor(Compressor):
name = 'bz2'
streamable = False
def can_read(self):
return self.file.peek(3)[:3] == b'BZh'
def get_reader(self):
return bz2.BZ2File(self.file, mode='rb')
def get_writer(self):
return bz2.BZ2File(self.file, mode='wb',
compresslevel=self.options['compresslevel'])
class AutoCompressor(Compressor):
streamable = True # We can' write so it doesn't matter
name = 'auto'
def get_reader(self):
for compression_handler in _compressors():
compressor = compression_handler(self.file, self.options)
if compressor.can_read():
return compressor.get_reader()
return self.file
def get_writer(self):
return self.file |
298,509 | term handler | import json, signal
from functools import wraps
from flask import request, abort, redirect, flash, url_for, render_template, make_response
from flask_login import login_user, current_user
from portality.api.common import Api401Error
from portality.core import app
from portality.lib import dates
from portality.models import Account
from portality.models.harvester import HarvesterProgressReport as Report
def swag(swag_summary, swag_spec):
"""
~~Swagger:Feature~~
Decorator for API functions, adding swagger info to the swagger spec.
"""
def decorator(f):
f.summary = swag_summary
f.swag = swag_spec
f.description = swag_summary
return f
return decorator
def api_key_required(fn):
"""
~~APIKey:Feature~~
Decorator for API functions, requiring a valid key to find a user
"""
@wraps(fn)
def decorated_view(*args, **kwargs):
api_key = request.values.get("api_key", None)
if api_key is not None:
user = Account.pull_by_api_key(api_key)
if user is not None:
if login_user(user, remember=False):
return fn(*args, **kwargs)
# else
raise Api401Error("An API Key is required to access this.")
return decorated_view
def api_key_optional(fn):
"""
~~APIKey:Feature~~
Decorator for API functions, requiring a valid key to find a user if a key is provided. OK if none provided.
"""
@wraps(fn)
def decorated_view(*args, **kwargs):
api_key = request.values.get("api_key", None)
if api_key:
user = Account.pull_by_api_key(api_key)
if user is not None:
if login_user(user, remember=False):
return fn(*args, **kwargs)
# else
abort(401)
# no api key, which is ok
return fn(*args, **kwargs)
return decorated_view
def ssl_required(fn):
"""
~~SSLRequired:Feature~~
Decorator for when a view f() should be served only over SSL
"""
@wraps(fn)
def decorated_view(*args, **kwargs):
if app.config.get("SSL"):
if request.is_secure:
return fn(*args, **kwargs)
else:
return redirect(request.url.replace("http://", "https://"))
return fn(*args, **kwargs)
return decorated_view
def restrict_to_role(role):
"""
~~Authorisation:Feature~~
:param role:
:return:
"""
if current_user.is_anonymous:
flash('You are trying to access a protected area. Please log in first.', 'error')
return redirect(url_for('account.login', next=request.url))
if not current_user.has_role(role):
flash('You do not have permission to access this area of the site.', 'error')
return redirect(url_for('doaj.home'))
def write_required(script=False, api=False):
"""
~~ReadOnlyMode:Feature~~
:param script:
:param api:
:return:
"""
def decorator(fn):
@wraps(fn)
def decorated_view(*args, **kwargs):
if app.config.get("READ_ONLY_MODE", False):
# TODO remove "script" argument from decorator.
# Should be possible to detect if this is run in a web context or not.
if script:
raise RuntimeError('This task cannot run since the system is in read-only mode.')
elif api:
resp = make_response(json.dumps({"message" : "We are currently carrying out essential maintenance, and this route is temporarily unavailable"}), 503)
resp.mimetype = "application/json"
return resp
else:
return render_template("doaj/readonly.html")
return fn(*args, **kwargs)
return decorated_view
return decorator
class CaughtTermException(Exception):
pass
def METHOD_NAME(signum, frame):
app.logger.warning("Harvester terminated with signal " + str(signum))
raise CaughtTermException
def capture_sigterm(fn):
"""
~~CaptureSigterm:Feature~~
Decorator which allows graceful exit on SIGTERM
"""
# Register the SIGTERM handler to raise an exception, allowing graceful exit.
signal.signal(signal.SIGTERM, METHOD_NAME)
@wraps(fn)
def decorated_fn(*args, **kwargs):
try:
fn(*args, **kwargs)
except (CaughtTermException, KeyboardInterrupt):
app.logger.warning(u"Harvester caught SIGTERM. Exiting.")
report = Report.write_report()
if app.config.get("HARVESTER_EMAIL_ON_EVENT", False):
to = app.config.get("HARVESTER_EMAIL_RECIPIENTS", None)
fro = app.config.get("SYSTEM_EMAIL_FROM")
if to is not None:
from portality import app_email as mail
mail.send_mail(
to=to,
fro=fro,
subject="DOAJ Harvester caught SIGTERM at {0}".format(dates.now_str()),
msg_body=report
)
app.logger.info(report)
exit(1)
return decorated_f |
298,510 | ttl | import base64
import json
import cloudinary
from cloudinary.api_client.call_api import call_json_api
from cloudinary.utils import unique, unsigned_download_url_prefix, build_distribution_domain, base64url_encode, \
json_encode, compute_hex_hash, SIGNATURE_SHA256
class Search(object):
ASSETS = 'resources'
_endpoint = ASSETS
_KEYS_WITH_UNIQUE_VALUES = {
'sort_by': lambda x: next(iter(x)),
'aggregate': None,
'with_field': None,
}
_ttl = 300 # Used for search URLs
"""Build and execute a search query."""
def __init__(self):
self.query = {}
def expression(self, value):
"""Specify the search query expression."""
self.query["expression"] = value
return self
def max_results(self, value):
"""Set the max results to return"""
self.query["max_results"] = value
return self
def next_cursor(self, value):
"""Get next page in the query using the ``next_cursor`` value from a previous invocation."""
self.query["next_cursor"] = value
return self
def sort_by(self, field_name, direction=None):
"""Add a field to sort results by. If not provided, direction is ``desc``."""
if direction is None:
direction = 'desc'
self._add("sort_by", {field_name: direction})
return self
def aggregate(self, value):
"""Aggregate field."""
self._add("aggregate", value)
return self
def with_field(self, value):
"""Request an additional field in the result set."""
self._add("with_field", value)
return self
def METHOD_NAME(self, METHOD_NAME):
"""
Sets the time to live of the search URL.
:param ttl: The time to live in seconds.
:return: self
"""
self._ttl = METHOD_NAME
return self
def to_json(self):
return json.dumps(self.as_dict())
def execute(self, **options):
"""Execute the search and return results."""
options["content_type"] = 'application/json'
uri = [self._endpoint, 'search']
return call_json_api('post', uri, self.as_dict(), **options)
def as_dict(self):
to_return = {}
for key, value in self.query.items():
if key in self._KEYS_WITH_UNIQUE_VALUES:
value = unique(value, self._KEYS_WITH_UNIQUE_VALUES[key])
to_return[key] = value
return to_return
def to_url(self, METHOD_NAME=None, next_cursor=None, **options):
"""
Creates a signed Search URL that can be used on the client side.
:param ttl: The time to live in seconds.
:param next_cursor: Starting position.
:param options: Additional url delivery options.
:return: The resulting search URL.
"""
api_secret = options.get("api_secret", cloudinary.config().api_secret or None)
if not api_secret:
raise ValueError("Must supply api_secret")
if METHOD_NAME is None:
METHOD_NAME = self._ttl
query = self.as_dict()
_next_cursor = query.pop("next_cursor", None)
if next_cursor is None:
next_cursor = _next_cursor
b64query = base64url_encode(json_encode(query, sort_keys=True))
prefix = build_distribution_domain(options)
signature = compute_hex_hash("{ttl}{b64query}{api_secret}".format(
METHOD_NAME=METHOD_NAME,
b64query=b64query,
api_secret=api_secret
), algorithm=SIGNATURE_SHA256)
return "{prefix}/search/{signature}/{ttl}/{b64query}{next_cursor}".format(
prefix=prefix,
signature=signature,
METHOD_NAME=METHOD_NAME,
b64query=b64query,
next_cursor="/{}".format(next_cursor) if next_cursor else "")
def endpoint(self, endpoint):
self._endpoint = endpoint
return self
def _add(self, name, value):
if name not in self.query:
self.query[name] = []
self.query[name].append(value)
return self |
298,511 | new | """HMAC (Keyed-Hashing for Message Authentication) module.
Implements the HMAC algorithm as described by RFC 2104.
"""
import warnings as _warnings
from _operator import _compare_digest as compare_digest
try:
import _hashlib as _hashopenssl
except ImportError:
_hashopenssl = None
_openssl_md_meths = None
else:
_openssl_md_meths = frozenset(_hashopenssl.openssl_md_meth_names)
import hashlib as _hashlib
trans_5C = bytes((x ^ 0x5C) for x in range(256))
trans_36 = bytes((x ^ 0x36) for x in range(256))
# The size of the digests returned by HMAC depends on the underlying
# hashing module used. Use digest_size from the instance of HMAC instead.
digest_size = None
class HMAC:
"""RFC 2104 HMAC class. Also complies with RFC 4231.
This supports the API for Cryptographic Hash Functions (PEP 247).
"""
blocksize = 64 # 512-bit HMAC; can be changed in subclasses.
def __init__(self, key, msg=None, digestmod=''):
"""Create a new HMAC object.
key: bytes or buffer, key for the keyed hash object.
msg: bytes or buffer, Initial input for the hash or None.
digestmod: A hash name suitable for hashlib.new(). *OR*
A hashlib constructor returning a new hash object. *OR*
A module supporting PEP 247.
Required as of 3.8, despite its position after the optional
msg argument. Passing it as a keyword argument is
recommended, though not required for legacy API reasons.
"""
if not isinstance(key, (bytes, bytearray)):
raise TypeError("key: expected bytes or bytearray, but got %r" % type(key).__name__)
if not digestmod:
raise TypeError("Missing required parameter 'digestmod'.")
if callable(digestmod):
self.digest_cons = digestmod
elif isinstance(digestmod, str):
self.digest_cons = lambda d=b'': _hashlib.METHOD_NAME(digestmod, d)
else:
self.digest_cons = lambda d=b'': digestmod.METHOD_NAME(d)
self.outer = self.digest_cons()
self.inner = self.digest_cons()
self.digest_size = self.inner.digest_size
if hasattr(self.inner, 'block_size'):
blocksize = self.inner.block_size
if blocksize < 16:
_warnings.warn('block_size of %d seems too small; using our '
'default of %d.' % (blocksize, self.blocksize),
RuntimeWarning, 2)
blocksize = self.blocksize
else:
_warnings.warn('No block_size attribute on given digest object; '
'Assuming %d.' % (self.blocksize),
RuntimeWarning, 2)
blocksize = self.blocksize
# self.blocksize is the default blocksize. self.block_size is
# effective block size as well as the public API attribute.
self.block_size = blocksize
if len(key) > blocksize:
key = self.digest_cons(key).digest()
key = key.ljust(blocksize, b'\0')
self.outer.update(key.translate(trans_5C))
self.inner.update(key.translate(trans_36))
if msg is not None:
self.update(msg)
@property
def name(self):
return "hmac-" + self.inner.name
def update(self, msg):
"""Feed data from msg into this hashing object."""
self.inner.update(msg)
def copy(self):
"""Return a separate copy of this hashing object.
An update to this copy won't affect the original object.
"""
# Call __new__ directly to avoid the expensive __init__.
other = self.__class__.__new__(self.__class__)
other.digest_cons = self.digest_cons
other.digest_size = self.digest_size
other.inner = self.inner.copy()
other.outer = self.outer.copy()
return other
def _current(self):
"""Return a hash object for the current state.
To be used only internally with digest() and hexdigest().
"""
h = self.outer.copy()
h.update(self.inner.digest())
return h
def digest(self):
"""Return the hash value of this hashing object.
This returns the hmac value as bytes. The object is
not altered in any way by this function; you can continue
updating the object after calling this function.
"""
h = self._current()
return h.digest()
def hexdigest(self):
"""Like digest(), but returns a string of hexadecimal digits instead.
"""
h = self._current()
return h.hexdigest()
def METHOD_NAME(key, msg=None, digestmod=''):
"""Create a new hashing object and return it.
key: bytes or buffer, The starting key for the hash.
msg: bytes or buffer, Initial input for the hash, or None.
digestmod: A hash name suitable for hashlib.new(). *OR*
A hashlib constructor returning a new hash object. *OR*
A module supporting PEP 247.
Required as of 3.8, despite its position after the optional
msg argument. Passing it as a keyword argument is
recommended, though not required for legacy API reasons.
You can now feed arbitrary bytes into the object using its update()
method, and can ask for the hash value at any time by calling its digest()
or hexdigest() methods.
"""
return HMAC(key, msg, digestmod)
def digest(key, msg, digest):
"""Fast inline implementation of HMAC.
key: bytes or buffer, The key for the keyed hash object.
msg: bytes or buffer, Input message.
digest: A hash name suitable for hashlib.new() for best performance. *OR*
A hashlib constructor returning a new hash object. *OR*
A module supporting PEP 247.
"""
if (_hashopenssl is not None and
isinstance(digest, str) and digest in _openssl_md_meths):
return _hashopenssl.hmac_digest(key, msg, digest)
if callable(digest):
digest_cons = digest
elif isinstance(digest, str):
digest_cons = lambda d=b'': _hashlib.METHOD_NAME(digest, d)
else:
digest_cons = lambda d=b'': digest.METHOD_NAME(d)
inner = digest_cons()
outer = digest_cons()
blocksize = getattr(inner, 'block_size', 64)
if len(key) > blocksize:
key = digest_cons(key).digest()
key = key + b'\x00' * (blocksize - len(key))
inner.update(key.translate(trans_36))
outer.update(key.translate(trans_5C))
inner.update(msg)
outer.update(inner.digest())
return outer.digest() |
298,512 | shortcuts | # This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
The model containing the snippets data.
"""
import bisect
from PyQt5.QtCore import QAbstractItemModel, QModelIndex, Qt
from PyQt5.QtGui import QKeySequence
import app
import actioncollection
from . import snippets
def model():
"""Returns the global model containing snippets."""
m = SnippetModel(app.qApp)
global model
model = lambda: m
return m
class SnippetModel(QAbstractItemModel):
"""Presents the snippets as a Qt Model."""
def __init__(self, parent = None):
super().__init__(parent)
self._names = []
self.load()
app.settingsChanged.connect(self.slotSettingsChanged)
app.languageChanged.connect(self.slotLanguageChanged)
# methods needed to be a well-behaved model
def headerData(self, section, orientation, role=Qt.DisplayRole):
if role == Qt.DisplayRole and orientation == Qt.Horizontal:
if section == 0:
return _("Name")
elif section == 1:
return _("Description")
else:
return _("Shortcut")
def index(self, row, column, parent=None):
return self.createIndex(row, column)
def parent(self, index):
return QModelIndex()
def columnCount(self, parent=QModelIndex()):
return 3 if not parent.isValid() else 0
def rowCount(self, parent=QModelIndex()):
return len(self._names) if not parent.isValid() else 0
def data(self, index, role=Qt.DisplayRole):
name = self.name(index)
if role == Qt.DisplayRole:
if index.column() == 0:
return snippets.get(name).variables.get('name')
elif index.column() == 1:
return snippets.title(name)
else:
return shortcut(name)
elif role == Qt.DecorationRole and index.column() == 1:
return snippets.icon(name)
# slots
def slotSettingsChanged(self):
"""Called when settings change, e.g. when keyboard shortcuts are altered."""
self.load()
def slotLanguageChanged(self):
"""Called when the user changes the language."""
self.headerDataChanged.emit(Qt.Horizontal, 0, 2)
def load(self):
self.beginResetModel()
self._names = sorted(snippets.names(), key=snippets.title)
self.endResetModel()
# interface for getting/altering snippets
def names(self):
"""Returns the internal list of snippet names in title order. Do not alter!"""
return self._names
def name(self, index):
"""The internal snippet id for the given QModelIndex."""
return self._names[index.row()]
def removeRows(self, row, count, parent=QModelIndex()):
end = row + count
self.beginRemoveRows(parent, row, end)
try:
for name in self._names[row:end]:
snippets.delete(name)
del self._names[row:end]
finally:
self.endRemoveRows()
return True
def saveSnippet(self, name, text, title):
"""Store a snippet.
If name is None or does not exist in names(), a new snippet is created.
Returns the QModelIndex the snippet was stored at.
Title may be None.
"""
# first, get the old titles list
titles = list(snippets.title(n) for n in self._names)
oldrow = None
if name is None:
name = snippets.name(self._names)
else:
try:
oldrow = self._names.index(name)
except ValueError:
pass
snippets.save(name, text, title)
# sort the new snippet in
# if oldrow is not None, it is the row to be removed.
title = snippets.title(name)
i = bisect.bisect_right(titles, title)
if oldrow is None:
# just insert new snippet
self.beginInsertRows(QModelIndex(), i, i )
self._names.insert(i, name)
self.endInsertRows()
return self.createIndex(i, 0)
elif i in (oldrow, oldrow+1):
# just replace
self._names[oldrow] = name
self.dataChanged.emit(self.createIndex(oldrow, 0), self.createIndex(oldrow, 2))
return self.createIndex(oldrow, 0)
else:
# move the old row to the new place
if self.beginMoveRows(QModelIndex(), oldrow, oldrow, QModelIndex(), i):
del self._names[oldrow]
if i > oldrow:
i -= 1
self._names.insert(i, name)
self.endMoveRows()
self.dataChanged.emit(self.createIndex(i, 0), self.createIndex(i, 2))
return self.createIndex(i, 0)
raise RuntimeError("wrong row move offset")
def shortcut(name):
"""Returns a shortcut text for the named snippets, if any, else None."""
s = METHOD_NAME(name)
if s:
text = s[0].toString(QKeySequence.NativeText)
if len(s) > 1:
text += "..."
return text
def METHOD_NAME(name):
"""Returns a (maybe empty) list of QKeySequences for the named snippet."""
ac = collection()
return ac and ac.METHOD_NAME(name) or []
def collection():
"""Returns an instance of the 'snippets' ShortcutCollection, if existing."""
try:
# HACK alert :-) access an instance of the ShortcutCollection named 'snippets'
ref = actioncollection.ShortcutCollection.others['snippets'][0]
except (KeyError, IndexError):
return
return ref()
|
298,513 | check values | #!/usr/bin/env python
# Copyright 2023, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Main program for yamllint checker tool.
"""
import os
import sys
# Unchanged, running from checkout, use the parent directory, the nuitka
# package ought be there.
sys.path.insert(
0, os.path.normpath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
)
# isort:start
from optparse import OptionParser
from nuitka.tools.Basics import goHome
from nuitka.tools.quality.ScanSources import scanTargets
from nuitka.Tracing import my_print, tools_logger
from nuitka.utils.FileOperations import (
getFileContents,
openTextFile,
resolveShellPatternToFilenames,
)
from nuitka.utils.Yaml import (
PackageConfigYaml,
getYamlPackage,
getYamlPackageConfigurationSchemaFilename,
parseYaml,
)
def checkYamllint(document):
import yamllint.cli # pylint: disable=I0021,import-error
my_print("Checking %r for proper yaml:" % document, style="blue")
try:
yamllint.cli.run([document])
except SystemExit as e:
lint_result = e.code
else:
sys.exit("Error, yamllint didn't raise expected SystemExit exception.")
if lint_result != 0:
sys.exit("Error, no lint clean yaml.")
my_print("OK, yamllint passed.", style="blue")
def checkSchema(document):
import json # pylint: disable=I0021,import-error
from jsonschema import validators # pylint: disable=I0021,import-error
from jsonschema.exceptions import ValidationError
yaml = getYamlPackage()
with openTextFile(getYamlPackageConfigurationSchemaFilename(), "r") as schema_file:
with openTextFile(document, "r") as yaml_file:
try:
validators.Draft202012Validator(
schema=json.loads(schema_file.read())
).validate(instance=yaml.load(yaml_file, yaml.BaseLoader))
except ValidationError:
tools_logger.sysexit(
"Error, please fix the schema errors in '%s' yaml file." % document
)
else:
my_print("OK, schema validated", style="blue")
def _checkValues(filename, module_name, section, value):
if type(value) is dict:
for k, v in value.items():
if k == "description" and v != v.strip():
my_print(
"%s: %s config value of %s %s should not contain trailing or leading spaces"
% (filename, module_name, section, k)
)
_checkValues(filename, module_name, section, v)
elif type(value) in (list, tuple):
for item in value:
_checkValues(filename, module_name, section, item)
def METHOD_NAME(filename):
yaml = PackageConfigYaml(
name=filename,
data=parseYaml(getFileContents(filename, mode="rb")),
)
for module_name, config in yaml.items():
for section, section_config in config.items():
_checkValues(filename, module_name, section, section_config)
def main():
parser = OptionParser()
_options, positional_args = parser.parse_args()
if not positional_args:
positional_args = ["nuitka/plugins/standard/*.yml"]
my_print("Working on:", positional_args)
positional_args = sum(
(
resolveShellPatternToFilenames(positional_arg)
for positional_arg in positional_args
),
[],
)
goHome()
filenames = list(
scanTargets(
positional_args,
suffixes=(".yaml",),
)
)
if not filenames:
sys.exit("No files found.")
for filename in filenames:
checkYamllint(filename)
checkSchema(filename)
METHOD_NAME(filename)
if __name__ == "__main__":
main() |
298,514 | merge | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import copy
from urllib3.util import parse_url
from warehouse.config import Environment
SELF = "'self'"
NONE = "'none'"
def _serialize(policy):
return "; ".join(
[
" ".join([k] + [v2 for v2 in v if v2 is not None])
for k, v in sorted(policy.items())
]
)
def content_security_policy_tween_factory(handler, registry):
def content_security_policy_tween(request):
resp = handler(request)
try:
policy = request.find_service(name="csp")
except LookupError:
policy = collections.defaultdict(list)
# Replace CSP headers on /simple/ pages.
if request.path.startswith("/simple/"):
policy = collections.defaultdict(list)
policy["sandbox"] = ["allow-top-navigation"]
policy["default-src"] = [NONE]
# We don't want to apply our Content Security Policy to the debug
# toolbar, that's not part of our application and it doesn't work with
# our restrictive CSP.
policy = _serialize(policy).format(request=request)
if not request.path.startswith("/_debug_toolbar/") and policy:
resp.headers["Content-Security-Policy"] = policy
return resp
return content_security_policy_tween
class CSPPolicy(collections.defaultdict):
def __init__(self, policy=None):
super().__init__(list, policy or {})
def METHOD_NAME(self, policy):
for key, attrs in policy.items():
self[key].extend(attrs)
# The keyword 'none' must be the only source expression in the
# directive value, otherwise it is ignored. If there's more than
# one directive set, attempt to remove 'none' if it is present
if NONE in self[key] and len(self[key]) > 1:
self[key].remove(NONE)
def csp_factory(_, request):
try:
return CSPPolicy(copy.deepcopy(request.registry.settings["csp"]))
except KeyError:
return CSPPolicy({})
def _connect_src_settings(config) -> list:
settings = [
SELF,
"https://api.github.com/repos/",
"https://api.github.com/search/issues",
"https://*.google-analytics.com",
"https://*.analytics.google.com",
"https://*.googletagmanager.com",
"fastly-insights.com",
"*.fastly-insights.com",
"*.ethicalads.io",
"https://api.pwnedpasswords.com",
# Scoped deeply to prevent other scripts calling other CDN resources
"https://cdn.jsdelivr.net/npm/mathjax@3.2.2/es5/sre/mathmaps/",
]
settings.extend(
[item for item in [config.registry.settings.get("statuspage.url")] if item]
)
if config.registry.settings.get("warehouse.env") == Environment.development:
livereload_url = config.registry.settings.get("livereload.url")
parsed_url = parse_url(livereload_url)
# Incoming scheme could be http or https.
scheme_replacement = "wss" if parsed_url.scheme == "https" else "ws"
replaced = parsed_url._replace(scheme=scheme_replacement) # noqa
settings.extend(
[
f"{replaced.url}/livereload",
]
)
return settings
def _script_src_settings(config) -> list:
settings = [
SELF,
"https://*.googletagmanager.com",
"https://www.google-analytics.com", # Remove when disabling UA
"https://ssl.google-analytics.com", # Remove when disabling UA
"*.fastly-insights.com",
"*.ethicalads.io",
# Hash for v1.4.0 of ethicalads.min.js
"'sha256-U3hKDidudIaxBDEzwGJApJgPEf2mWk6cfMWghrAa6i0='",
"https://cdn.jsdelivr.net/npm/mathjax@3.2.2/",
# Hash for v3.2.2 of MathJax tex-svg.js
"'sha256-1CldwzdEg2k1wTmf7s5RWVd7NMXI/7nxxjJM2C4DqII='",
# Hash for MathJax inline config
# See warehouse/templates/packaging/detail.html
"'sha256-0POaN8stWYQxhzjKS+/eOfbbJ/u4YHO5ZagJvLpMypo='",
]
if config.registry.settings.get("warehouse.env") == Environment.development:
settings.extend(
[
f"{config.registry.settings['livereload.url']}/livereload.js",
]
)
return settings
def includeme(config):
config.register_service_factory(csp_factory, name="csp")
# Enable a Content Security Policy
config.add_settings(
{
"csp": {
"base-uri": [SELF],
"block-all-mixed-content": [],
"connect-src": _connect_src_settings(config),
"default-src": [NONE],
"font-src": [SELF, "fonts.gstatic.com"],
"form-action": [SELF, "https://checkout.stripe.com"],
"frame-ancestors": [NONE],
"frame-src": [NONE],
"img-src": [
SELF,
config.registry.settings["camo.url"],
"https://*.google-analytics.com",
"https://*.googletagmanager.com",
"*.fastly-insights.com",
"*.ethicalads.io",
],
"script-src": _script_src_settings(config),
"style-src": [
SELF,
"fonts.googleapis.com",
"*.ethicalads.io",
# Hashes for inline styles generated by v1.4.0 of ethicalads.min.js
"'sha256-2YHqZokjiizkHi1Zt+6ar0XJ0OeEy/egBnlm+MDMtrM='",
"'sha256-47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU='",
# Hashes for inline styles generated by v3.2.2 of MathJax tex-svg.js
"'sha256-JLEjeN9e5dGsz5475WyRaoA4eQOdNPxDIeUhclnJDCE='",
"'sha256-mQyxHEuwZJqpxCw3SLmc4YOySNKXunyu2Oiz1r3/wAE='",
"'sha256-OCf+kv5Asiwp++8PIevKBYSgnNLNUZvxAp4a7wMLuKA='",
"'sha256-h5LOiLhk6wiJrGsG5ItM0KimwzWQH/yAcmoJDJL//bY='",
],
"worker-src": ["*.fastly-insights.com"],
}
}
)
config.add_tween("warehouse.csp.content_security_policy_tween_factory") |
298,515 | vina energy term | """
Utilities to score protein-ligand poses using DeepChem.
"""
import numpy as np
def pairwise_distances(coords1: np.ndarray, coords2: np.ndarray) -> np.ndarray:
"""Returns matrix of pairwise Euclidean distances.
Parameters
----------
coords1: np.ndarray
A numpy array of shape `(N, 3)`
coords2: np.ndarray
A numpy array of shape `(M, 3)`
Returns
-------
np.ndarray
A `(N,M)` array with pairwise distances.
"""
return np.sum((coords1[None, :] - coords2[:, None])**2, -1)**0.5
def cutoff_filter(d: np.ndarray, x: np.ndarray, cutoff=8.0) -> np.ndarray:
"""Applies a cutoff filter on pairwise distances
Parameters
----------
d: np.ndarray
Pairwise distances matrix. A numpy array of shape `(N, M)`
x: np.ndarray
Matrix of shape `(N, M)`
cutoff: float, optional (default 8)
Cutoff for selection in Angstroms
Returns
-------
np.ndarray
A `(N,M)` array with values where distance is too large thresholded to 0.
"""
return np.where(d < cutoff, x, np.zeros_like(x))
def vina_nonlinearity(c: np.ndarray, w: float, Nrot: int) -> np.ndarray:
"""Computes non-linearity used in Vina.
Parameters
----------
c: np.ndarray
A numpy array of shape `(N, M)`
w: float
Weighting term
Nrot: int
Number of rotatable bonds in this molecule
Returns
-------
np.ndarray
A `(N, M)` array with activations under a nonlinearity.
"""
out_tensor = c / (1 + w * Nrot)
return out_tensor
def vina_repulsion(d: np.ndarray) -> np.ndarray:
"""Computes Autodock Vina's repulsion interaction term.
Parameters
----------
d: np.ndarray
A numpy array of shape `(N, M)`.
Returns
-------
np.ndarray
A `(N, M)` array with repulsion terms.
"""
return np.where(d < 0, d**2, np.zeros_like(d))
def vina_hydrophobic(d: np.ndarray) -> np.ndarray:
"""Computes Autodock Vina's hydrophobic interaction term.
Here, d is the set of surface distances as defined in [1]_
Parameters
----------
d: np.ndarray
A numpy array of shape `(N, M)`.
Returns
-------
np.ndarray
A `(N, M)` array of hydrophoboic interactions in a piecewise linear curve.
References
----------
.. [1] Jain, Ajay N. "Scoring noncovalent protein-ligand interactions:
a continuous differentiable function tuned to compute binding affinities."
Journal of computer-aided molecular design 10.5 (1996): 427-440.
"""
out_tensor = np.where(d < 0.5, np.ones_like(d),
np.where(d < 1.5, 1.5 - d, np.zeros_like(d)))
return out_tensor
def vina_hbond(d: np.ndarray) -> np.ndarray:
"""Computes Autodock Vina's hydrogen bond interaction term.
Here, d is the set of surface distances as defined in [1]_
Parameters
----------
d: np.ndarray
A numpy array of shape `(N, M)`.
Returns
-------
np.ndarray
A `(N, M)` array of hydrophoboic interactions in a piecewise linear curve.
References
----------
.. [1] Jain, Ajay N. "Scoring noncovalent protein-ligand interactions:
a continuous differentiable function tuned to compute binding affinities."
Journal of computer-aided molecular design 10.5 (1996): 427-440.
"""
out_tensor = np.where(
d < -0.7, np.ones_like(d),
np.where(d < 0, (1.0 / 0.7) * (0 - d), np.zeros_like(d)))
return out_tensor
def vina_gaussian_first(d: np.ndarray) -> np.ndarray:
"""Computes Autodock Vina's first Gaussian interaction term.
Here, d is the set of surface distances as defined in [1]_
Parameters
----------
d: np.ndarray
A numpy array of shape `(N, M)`.
Returns
-------
np.ndarray
A `(N, M)` array of gaussian interaction terms.
References
----------
.. [1] Jain, Ajay N. "Scoring noncovalent protein-ligand interactions:
a continuous differentiable function tuned to compute binding affinities."
Journal of computer-aided molecular design 10.5 (1996): 427-440.
"""
out_tensor = np.exp(-(d / 0.5)**2)
return out_tensor
def vina_gaussian_second(d: np.ndarray) -> np.ndarray:
"""Computes Autodock Vina's second Gaussian interaction term.
Here, d is the set of surface distances as defined in [1]_
Parameters
----------
d: np.ndarray
A numpy array of shape `(N, M)`.
Returns
-------
np.ndarray
A `(N, M)` array of gaussian interaction terms.
References
----------
.. [1] Jain, Ajay N. "Scoring noncovalent protein-ligand interactions:
a continuous differentiable function tuned to compute binding affinities."
Journal of computer-aided molecular design 10.5 (1996): 427-440.
"""
out_tensor = np.exp(-((d - 3) / 2)**2)
return out_tensor
def weighted_linear_sum(w: np.ndarray, x: np.ndarray) -> np.ndarray:
"""Computes weighted linear sum.
Parameters
----------
w: np.ndarray
A numpy array of shape `(N,)`
x: np.ndarray
A numpy array of shape `(N, M, L)`
Returns
-------
np.ndarray
A numpy array of shape `(M, L)`
"""
return np.tensordot(w, x, axes=1)
def METHOD_NAME(coords1: np.ndarray, coords2: np.ndarray,
weights: np.ndarray, wrot: float, Nrot: int) -> np.ndarray:
"""Computes the Vina Energy function for two molecular conformations
Parameters
----------
coords1: np.ndarray
Molecular coordinates of shape `(N, 3)`
coords2: np.ndarray
Molecular coordinates of shape `(M, 3)`
weights: np.ndarray
A numpy array of shape `(5,)`. The 5 values are weights for repulsion interaction term,
hydrophobic interaction term, hydrogen bond interaction term,
first Gaussian interaction term and second Gaussian interaction term.
wrot: float
The scaling factor for nonlinearity
Nrot: int
Number of rotatable bonds in this calculation
Returns
-------
np.ndarray
A scalar value with free energy
"""
# TODO(rbharath): The autodock vina source computes surface distances
# which take into account the van der Waals radius of each atom type.
dists = pairwise_distances(coords1, coords2)
repulsion = vina_repulsion(dists)
hydrophobic = vina_hydrophobic(dists)
hbond = vina_hbond(dists)
gauss_1 = vina_gaussian_first(dists)
gauss_2 = vina_gaussian_second(dists)
# Shape (N, M)
interactions = weighted_linear_sum(
weights, np.array([repulsion, hydrophobic, hbond, gauss_1, gauss_2]))
# Shape (N, M)
thresholded = cutoff_filter(dists, interactions)
free_energies = vina_nonlinearity(thresholded, wrot, Nrot)
return np.sum(free_energies) |
298,516 | on 200 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"monitor log-analytics query-pack show",
is_preview=True,
)
class Show(AAZCommand):
"""Show a log analytics query pack.
:example: Show a query pack
az monitor log-analytics query-pack show -g resourceGroup -n queryPackName
"""
_aaz_info = {
"version": "2019-09-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.operationalinsights/querypacks/{}", "2019-09-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.query_pack_name = AAZStrArg(
options=["-n", "--name", "--query-pack-name"],
help="The name of the log analytics query pack.",
required=True,
id_part="name",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.QueryPacksGet(ctx=self.ctx)()
self.post_operations()
# @register_callback
def pre_operations(self):
pass
# @register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class QueryPacksGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.METHOD_NAME(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/queryPacks/{queryPackName}",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"queryPackName", self.ctx.args.query_pack_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2019-09-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def METHOD_NAME(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.id = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.location = AAZStrType(
flags={"required": True},
)
_schema_on_200.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.properties = AAZObjectType(
flags={"required": True, "client_flatten": True},
)
_schema_on_200.tags = AAZDictType()
_schema_on_200.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.properties
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.query_pack_id = AAZStrType(
serialized_name="queryPackId",
flags={"read_only": True},
)
properties.time_created = AAZStrType(
serialized_name="timeCreated",
flags={"read_only": True},
)
properties.time_modified = AAZStrType(
serialized_name="timeModified",
flags={"read_only": True},
)
tags = cls._schema_on_200.tags
tags.Element = AAZStrType()
return cls._schema_on_200
__all__ = ["Show"] |
298,517 | populate regions | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# LICENSE
#
# Copyright (C) 2010-2023 GEM Foundation, G. Weatherill, M. Pagani,
# D. Monelli.
#
# The Hazard Modeller's Toolkit is free software: you can redistribute
# it and/or modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>
#
# DISCLAIMER
#
# The software Hazard Modeller's Toolkit (openquake.hmtk) provided herein
# is released as a prototype implementation on behalf of
# scientists and engineers working within the GEM Foundation (Global
# Earthquake Model).
#
# It is distributed for the purpose of open collaboration and in the
# hope that it will be useful to the scientific, engineering, disaster
# risk and software design communities.
#
# The software is NOT distributed as part of GEM's OpenQuake suite
# (https://www.globalquakemodel.org/tools-products) and must be considered as a
# separate entity. The software provided herein is designed and implemented
# by scientific staff. It is not developed to the design standards, nor
# subject to same level of critical review by professional software
# developers, as GEM's OpenQuake software suite.
#
# Feedback and contribution to the software is welcome, and can be
# directed to the hazard scientific staff of the GEM Model Facility
# (hazard@globalquakemodel.org).
#
# The Hazard Modeller's Toolkit (openquake.hmtk) is therefore distributed
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# The GEM Foundation, and the authors of the software, assume no
# liability for use of the software.
'''
:mod:`openquake.hmtk.regionalisation.tectonic_regionalisation` implements
:class:`openquake.hmtk.ancillary.tectonic_regionalisation.TectonicRegion`,
defining the methods and attributes associated with a region, and the
:class:`openquake.hmtk.ancillary.tectonic_regionalisation.TectonicRegionalisation` defining a regionalisation as a set of regions
'''
from math import fabs
import numpy as np
from openquake.hazardlib.scalerel.wc1994 import WC1994
DEFAULT_SHEAR_MODULUS = [(30.0, 1.0)]
DEFAULT_DLR = [(1.25E-5, 1.0)]
DEFAULT_MSR = [(WC1994(), 1.0)]
def _check_list_weights(parameter, name):
'''
Checks that the weights in a list of tuples sums to 1.0
'''
if not isinstance(parameter, list):
raise ValueError('%s must be formatted with a list of tuples' % name)
weight = np.sum([val[1] for val in parameter])
if fabs(weight - 1.) > 1E-8:
raise ValueError('%s weights do not sum to 1.0!' % name)
return parameter
class TectonicRegion(object):
'''
Definition of the tectonic region
'''
def __init__(self, identifier, name, shear_modulus=None,
disp_length_ratio=None, scaling_rel=None):
shear_modulus = shear_modulus or DEFAULT_SHEAR_MODULUS
disp_length_ratio = disp_length_ratio or DEFAULT_DLR
scaling_rel = scaling_rel or DEFAULT_MSR
self.id = identifier
self.region_name = name
self.shear_modulus = _check_list_weights(
shear_modulus, 'Shear Modulus ' + self.region_name)
self.disp_length_ratio = _check_list_weights(
disp_length_ratio,
'Displacement to Length Ratio ' + self.region_name)
self.scaling_rel = _check_list_weights(
scaling_rel,
'Scaling Relation ' + self.region_name)
class TectonicRegionalisation(object):
'''
Defines a set of regionalisations
'''
def __init__(self):
'''
'''
self.regionalisation = []
self.key_list = []
def METHOD_NAME(self, tectonic_region_dict):
'''
Populates the tectonic region from the list of dictionaries, where each
region is a dictionary of with the following format::
region = {'Shear_Modulus': [(val1, weight1), (val2, weight2), ...],
'Displacement_Length_Ratio': [(val1, weight1), ...],
'Magnitude_Scaling_Relation': [(val1, weight1), ...]}
'''
for tect_reg in tectonic_region_dict:
if 'Shear_Modulus' in tect_reg.keys():
shear_modulus = tect_reg['Shear_Modulus']
else:
shear_modulus = DEFAULT_SHEAR_MODULUS
if 'Displacement_Length_Ratio' in tect_reg.keys():
disp_length_ratio = tect_reg['Displacement_Length_Ratio']
else:
disp_length_ratio = DEFAULT_DLR
if 'Magnitude_Scaling_Relation' in tect_reg.keys():
scaling_relation = tect_reg['Magnitude_Scaling_Relation']
else:
scaling_relation = DEFAULT_MSR
self.regionalisation.append(
TectonicRegion(
tect_reg['Code'], tect_reg['Name'],
shear_modulus, disp_length_ratio, scaling_relation))
self.key_list.append(tect_reg['Name'])
def get_number_regions(self):
'''
Returns the number of tectonic regions in a regionalisation
'''
return len(self.key_list) |
298,518 | get speed tolerance | #!/usr/bin/env python
#############################################################################
# Celestica
#
# Module contains an implementation of SONiC Platform Base API and
# provides the fan status which are available in the platform
#
#############################################################################
import math
import os.path
try:
from sonic_platform_base.fan_base import FanBase
except ImportError as e:
raise ImportError(str(e) + "- required module not found")
FAN_PATH = "/sys/class/hwmon/hwmon1/"
FAN_MAX_PWM = 255
FAN_FAN_PWM = "pwm{}"
FAN_FAN_INPUT = "fan{}_input"
FAN_MAX_RPM = 9000
FAN_NAME_LIST = ["FAN-1", "FAN-2", "FAN-3", "FAN-4"]
class Fan(FanBase):
"""Platform-specific Fan class"""
def __init__(self, fan_tray_index, fan_index=0):
self.fan_index = fan_index
self.fan_tray_index = fan_tray_index
FanBase.__init__(self)
def __read_txt_file(self, file_path):
try:
with open(file_path, 'r') as fd:
data = fd.read()
return data.strip()
except IOError:
pass
return ""
def __write_txt_file(self, file_path, value):
try:
with open(file_path, 'w') as fd:
fd.write(str(value))
except IOError:
return False
return True
def __search_file_by_name(self, directory, file_name):
for dirpath, dirnames, files in os.walk(directory):
for name in files:
file_path = os.path.join(dirpath, name)
if name in file_name:
return file_path
return None
def get_direction(self):
"""
Retrieves the direction of fan
Returns:
A string, either FAN_DIRECTION_INTAKE or FAN_DIRECTION_EXHAUST
depending on fan direction
"""
direction = self.FAN_DIRECTION_EXHAUST
return direction
def get_speed(self):
"""
Retrieves the speed of fan as a percentage of full speed
Returns:
An integer, the percentage of full fan speed, in the range 0 (off)
to 100 (full speed)
Note:
speed = pwm_in/255*100
"""
speed = 0
fan_speed_sysfs_name = "fan{}_input".format(self.fan_index+1)
fan_speed_sysfs_path = self.__search_file_by_name(
FAN_PATH, fan_speed_sysfs_name)
fan_speed_rpm = self.__read_txt_file(fan_speed_sysfs_path) or 0
speed = math.ceil(float(fan_speed_rpm) * 100 / FAN_MAX_RPM)
return int(speed)
def get_target_speed(self):
"""
Retrieves the target (expected) speed of the fan
Returns:
An integer, the percentage of full fan speed, in the range 0 (off)
to 100 (full speed)
Note:
speed_pc = pwm_target/255*100
0 : when PWM mode is use
pwm : when pwm mode is not use
"""
# target = 0
# fan_target_sysfs_name = "pwm{}".format(self.fan_index+1)
# fan_target_sysfs_path = self.__search_file_by_name(
# FAN_PATH, fan_target_sysfs_name)
# fan_target_pwm = self.__read_txt_file(fan_target_sysfs_path) or 0
# target = math.ceil(float(fan_target_pwm) * 100 / FAN_MAX_PWM)
# return target
speed = 0
fan_speed_sysfs_name = "fan{}_input".format(self.fan_index+1)
fan_speed_sysfs_path = self.__search_file_by_name(
FAN_PATH, fan_speed_sysfs_name)
fan_speed_rpm = self.__read_txt_file(fan_speed_sysfs_path) or 0
speed = math.ceil(float(fan_speed_rpm) * 100 / FAN_MAX_RPM)
return speed
def METHOD_NAME(self):
"""
Retrieves the speed tolerance of the fan
Returns:
An integer, the percentage of variance from target speed which is
considered tolerable
"""
return 10
def set_speed(self, speed):
"""
Sets the fan speed
Args:
speed: An integer, the percentage of full fan speed to set fan to,
in the range 0 (off) to 100 (full speed)
Returns:
A boolean, True if speed is set successfully, False if not
Note:
Depends on pwm or target mode is selected:
1) pwm = speed_pc * 255 <-- Currently use this mode.
2) target_pwm = speed_pc * 100 / 255
2.1) set pwm{}_enable to 3
"""
pwm = speed * 255 / 100
fan_target_sysfs_name = "pwm{}".format(self.fan_index+1)
fan_target_sysfs_path = self.__search_file_by_name(
FAN_PATH, fan_target_sysfs_name)
return self.__write_txt_file(fan_target_sysfs_path, int(pwm))
def set_status_led(self, color):
"""
Sets the state of the fan module status LED
Args:
color: A string representing the color with which to set the
fan module status LED
Returns:
bool: always True
"""
return True
def get_name(self):
"""
Retrieves the name of the device
Returns:
string: The name of the device
"""
fan_name = FAN_NAME_LIST[self.fan_index]
return fan_name
def get_presence(self):
"""
Retrieves the presence of the FAN
Returns:
bool: always True
"""
return True
def get_status(self):
"""
Retrieves the status of the FAN
Returns:
bool: always True
"""
return True |
298,519 | on clean started | # -*- coding: utf-8 -*-
"""
Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix)
Copyright (C) 2018 Caphm (original implementation module)
Automatic updates of items exported to the Kodi library
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
from datetime import datetime, timedelta
import xbmc
from resources.lib.globals import G
import resources.lib.common as common
from resources.lib.kodi.library_utils import get_library_path
from resources.lib.utils.logging import LOG
class LibraryUpdateService(xbmc.Monitor):
"""
Checks if a library update is scheduled and triggers it
"""
def __init__(self):
super().__init__()
try:
self.enabled = (G.ADDON.getSettingBool('lib_enabled')
and G.ADDON.getSettingInt('lib_auto_upd_mode') in [0, 2])
except Exception: # pylint: disable=broad-except
# If settings.xml was not created yet, as at first service run
# G.ADDON.getSettingInt('lib_auto_upd_mode') will thrown a TypeError
self.enabled = False
self.startidle = 0
self.next_schedule = _compute_next_schedule()
# Request library update variables
self.scan_in_progress = False
self.scan_awaiting = False
self.clean_in_progress = False
self.clean_awaiting = False
common.register_slot(self.request_kodi_library_update, common.Signals.REQUEST_KODI_LIBRARY_UPDATE,
is_signal=True)
def on_service_tick(self):
"""Check if update is due and trigger it"""
if not self.enabled or self.next_schedule is None:
return
if self.next_schedule <= datetime.now() and self.is_idle():
# Check if the schedule value is changed
# (when a manual update/full sync is done, we avoid to perform again the update)
self.next_schedule = _compute_next_schedule()
if self.next_schedule >= datetime.now():
return
LOG.debug('Triggering auto update library')
# Send signal to nfsession to run the library auto update
common.send_signal('library_auto_update')
# Compute the next schedule
self.next_schedule = _compute_next_schedule(datetime.now())
def is_idle(self):
"""
Check if Kodi has been idle for 5 minutes
"""
try:
if not G.ADDON.getSettingBool('lib_auto_upd_wait_idle'):
return True
except TypeError:
# Could happen when the service tick is executed at the same time when the settings are written
return False
lastidle = xbmc.getGlobalIdleTime()
if xbmc.Player().isPlaying():
self.startidle = lastidle
if lastidle < self.startidle:
self.startidle = 0
idletime = lastidle - self.startidle
return idletime >= 300
def onSettingsChanged(self):
"""
As settings changed, we will compute next schedule again to ensure it's still correct
"""
# Wait for slow system (like Raspberry Pi) to write the settings
xbmc.sleep(500)
# Check if the status is changed
self.enabled = (G.ADDON.getSettingBool('lib_enabled')
and G.ADDON.getSettingInt('lib_auto_upd_mode') in [0, 2])
# Then compute the next schedule
if self.enabled:
self.next_schedule = _compute_next_schedule()
def onScanStarted(self, library):
"""Monitor library scan to avoid multiple calls"""
if library == 'video':
self.scan_in_progress = True
def onScanFinished(self, library):
"""Monitor library scan to avoid multiple calls"""
if library == 'video':
self.scan_in_progress = False
self.check_awaiting_operations()
def METHOD_NAME(self, library):
"""Monitor library clean to avoid multiple calls"""
if library == 'video':
self.clean_in_progress = True
def onCleanFinished(self, library):
"""Monitor library clean to avoid multiple calls"""
if library == 'video':
self.clean_in_progress = False
self.check_awaiting_operations()
def request_kodi_library_update(self, clean=False, scan=False):
"""Make a request for scan/clean the Kodi library database"""
# Kodi library scan/clean has some issues (Kodi 18/19), for example:
# - If more than one scan calls will be performed, the last call cancel the previous scan
# - If a clean is in progress, a new scan/clean call will be ignored
# To manage these problems we monitor the events to check if a scan/clean is currently in progress
# (from this or others add-ons) and delay the call until the current scan/clean will be finished.
if clean:
self.start_clean_kodi_library()
if scan:
self.start_update_kodi_library()
def check_awaiting_operations(self):
if self.clean_awaiting:
LOG.debug('Kodi library clean requested (from awaiting)')
self.start_clean_kodi_library()
if self.scan_awaiting:
LOG.debug('Kodi library scan requested (from awaiting)')
self.start_update_kodi_library()
def start_update_kodi_library(self):
if not self.scan_in_progress and not self.clean_in_progress:
LOG.debug('Start Kodi library scan')
self.scan_in_progress = True # Set as in progress (avoid wait "started" callback it comes late)
self.scan_awaiting = False
common.scan_library(get_library_path())
else:
self.scan_awaiting = True
def start_clean_kodi_library(self):
if not self.scan_in_progress and not self.clean_in_progress:
LOG.debug('Start Kodi library clean')
self.clean_in_progress = True # Set as in progress (avoid wait "started" callback it comes late)
self.clean_awaiting = False
common.clean_library(False, get_library_path())
else:
self.clean_awaiting = True
def _compute_next_schedule(date_last_start=None):
try:
if G.ADDON.getSettingBool('use_mysql'):
client_uuid = G.LOCAL_DB.get_value('client_uuid')
uuid = G.SHARED_DB.get_value('auto_update_device_uuid')
if client_uuid != uuid:
LOG.debug('The auto update has been disabled because another device '
'has been set as the main update manager')
return None
last_run = date_last_start or G.SHARED_DB.get_value('library_auto_update_last_start',
datetime.utcfromtimestamp(0))
if G.ADDON.getSettingInt('lib_auto_upd_mode') == 0: # Update at Kodi startup
time = '00:00'
update_frequency = 0
else:
time = G.ADDON.getSetting('lib_auto_upd_start') or '00:00'
update_frequency = G.ADDON.getSettingInt('lib_auto_upd_freq')
last_run = last_run.replace(hour=int(time[0:2]), minute=int(time[3:5]))
next_run = last_run + timedelta(days=[1, 2, 5, 7][update_frequency])
if next_run >= datetime.now():
LOG.info('Next library auto update is scheduled for {}', next_run)
return next_run
except Exception: # pylint: disable=broad-except
# If settings.xml was not created yet, as at first service run
# G.ADDON.getSettingBool('use_mysql') will thrown a TypeError
# If any other error appears, we don't want the service to crash,
# let's return None in all case
# import traceback
# LOG.debug(traceback.format_exc())
LOG.warn('Managed error at _compute_next_schedule')
return None |
298,520 | mean filter | import contextlib
from collections import ChainMap
from time import perf_counter as clock
import cupy as cp
import numpy as np
import pandas as pd
from cupyx.scipy.ndimage.filters import convolve as cp_convolve
from scipy.ndimage import convolve as sp_convolve
from dask import array as da
from dask.distributed import performance_report, wait
from dask.utils import format_bytes, parse_bytes
from dask_cuda.benchmarks.common import Config, execute_benchmark
from dask_cuda.benchmarks.utils import (
as_noop,
parse_benchmark_args,
print_key_value,
print_separator,
print_throughput_bandwidth,
)
def METHOD_NAME(a, shape):
a_k = np.full_like(a, 1.0 / np.prod(shape), shape=shape)
if isinstance(a, cp.ndarray):
return cp_convolve(a, a_k)
else:
return sp_convolve(a, a_k)
def bench_once(client, args, write_profile=None):
# Create a simple random array
if args.type == "gpu":
rs = da.random.RandomState(RandomState=cp.random.RandomState)
else:
rs = da.random.RandomState(RandomState=np.random.RandomState)
x = rs.random((args.size, args.size), chunks=args.chunk_size).persist()
ks = 2 * (2 * args.kernel_size + 1,)
wait(x)
data_processed = x.nbytes
# Execute the operations to benchmark
if args.profile is not None and write_profile is not None:
ctx = performance_report(filename=args.profile)
else:
ctx = contextlib.nullcontext()
with ctx:
result = x.map_overlap(METHOD_NAME, args.kernel_size, shape=ks)
if args.backend == "dask-noop":
result = as_noop(result)
t1 = clock()
wait(client.persist(result))
took = clock() - t1
return (data_processed, took)
def pretty_print_results(args, address_to_index, p2p_bw, results):
if args.markdown:
print("```")
print("Cupy map overlap benchmark")
print_separator(separator="-")
print_key_value(key="Backend", value=f"{args.backend}")
print_key_value(key="Array type", value="cupy" if args.type == "gpu" else "numpy")
print_key_value(key="Size", value=f"{args.size}*{args.size}")
print_key_value(key="Chunk size", value=f"{args.chunk_size}")
print_key_value(key="Ignore size", value=f"{format_bytes(args.ignore_size)}")
print_key_value(key="Kernel size", value=f"{args.kernel_size}")
print_key_value(key="Device(s)", value=f"{args.devs}")
if args.device_memory_limit:
print_key_value(
key="Device memory limit",
value=f"{format_bytes(args.device_memory_limit)}",
)
print_key_value(key="RMM Pool", value=f"{not args.disable_rmm_pool}")
print_key_value(key="Protocol", value=f"{args.protocol}")
if args.protocol == "ucx":
print_key_value(key="TCP", value=f"{args.enable_tcp_over_ucx}")
print_key_value(key="InfiniBand", value=f"{args.enable_infiniband}")
print_key_value(key="NVLink", value=f"{args.enable_nvlink}")
print_key_value(key="Worker thread(s)", value=f"{args.threads_per_worker}")
data_processed, durations = zip(*results)
if args.markdown:
print("\n```")
print_throughput_bandwidth(
args, durations, data_processed, p2p_bw, address_to_index
)
def create_tidy_results(args, p2p_bw, results):
configuration = {
"array_type": "cupy" if args.type == "gpu" else "numpy",
"backend": args.backend,
"user_size": args.size,
"chunk_size": args.chunk_size,
"ignore_size": args.ignore_size,
"devices": args.devs,
"device_memory_limit": args.device_memory_limit,
"worker_threads": args.threads_per_worker,
"rmm_pool": not args.disable_rmm_pool,
"protocol": args.protocol,
"tcp": args.enable_tcp_over_ucx,
"ib": args.enable_infiniband,
"nvlink": args.enable_nvlink,
"nreps": args.runs,
"kernel_size": args.kernel_size,
}
timing_data = pd.DataFrame(
[
pd.Series(
data=ChainMap(
configuration,
{
"wallclock": duration,
"data_processed": data_processed,
},
)
)
for (data_processed, duration) in results
]
)
return timing_data, p2p_bw
def parse_args():
special_args = [
{
"name": [
"-s",
"--size",
],
"default": "10000",
"metavar": "n",
"type": int,
"help": "The size n in n^2 (default 10000)",
},
{
"name": [
"-t",
"--type",
],
"choices": ["cpu", "gpu"],
"default": "gpu",
"type": str,
"help": "Use GPU or CPU arrays",
},
{
"name": [
"-c",
"--chunk-size",
],
"default": "128 MiB",
"metavar": "nbytes",
"type": str,
"help": "Chunk size (default '128 MiB')",
},
{
"name": [
"-k",
"--kernel-size",
],
"default": "1",
"metavar": "k",
"type": int,
"help": "Kernel size, 2*k+1, in each dimension (default 1)",
},
{
"name": "--ignore-size",
"default": "1 MiB",
"metavar": "nbytes",
"type": parse_bytes,
"help": "Ignore messages smaller than this (default '1 MB')",
},
{
"name": "--runs",
"default": 3,
"type": int,
"help": "Number of runs",
},
{
"name": [
"-b",
"--backend",
],
"choices": ["dask", "dask-noop"],
"default": "dask",
"type": str,
"help": "Compute backend to use.",
},
]
return parse_benchmark_args(
description="Transpose on LocalCUDACluster benchmark", args_list=special_args
)
if __name__ == "__main__":
execute_benchmark(
Config(
args=parse_args(),
bench_once=bench_once,
create_tidy_results=create_tidy_results,
pretty_print_results=pretty_print_results,
)
) |
298,521 | main | #!/usr/bin/env python3
import logging
import re
from typing import Union, List
from helpermodules.cli import run_using_positional_cli_args
from modules.common import req
from modules.common.abstract_device import DeviceDescriptor
from modules.common.configurable_device import ConfigurableDevice, ComponentFactoryByType, IndependentComponentUpdater
from modules.devices.http.bat import HttpBat
from modules.devices.http.config import HTTP, HTTPConfiguration, HttpBatSetup, HttpCounterSetup, HttpInverterSetup, \
HttpBatConfiguration, HttpCounterConfiguration, HttpInverterConfiguration
from modules.devices.http.counter import HttpCounter
from modules.devices.http.inverter import HttpInverter
log = logging.getLogger(__name__)
def create_device(device_config: HTTP):
def create_bat_component(component_config: HttpBatSetup):
return HttpBat(device_config.id, component_config, device_config.configuration.url)
def create_counter_component(component_config: HttpCounterSetup):
return HttpCounter(device_config.id, component_config, device_config.configuration.url)
def create_inverter_component(component_config: HttpInverterSetup):
return HttpInverter(device_config.id, component_config, device_config.configuration.url)
session = req.get_http_session()
return ConfigurableDevice(
device_config=device_config,
component_factory=ComponentFactoryByType(
bat=create_bat_component,
counter=create_counter_component,
inverter=create_inverter_component,
),
component_updater=IndependentComponentUpdater(lambda component: component.update(session))
)
def create_paths_dict(**kwargs):
regex = re.compile("^(https?://[^/]+)(.*)")
result = {}
host_scheme = None
for key, path in kwargs.items():
if path != "none" and path != "":
match = regex.search(path)
if match is None:
raise Exception("Invalid URL <" + path + ">: Absolute HTTP or HTTPS URL required")
if host_scheme is None:
host_scheme = match.group(1)
elif host_scheme != match.group(1):
raise Exception("All URLs must have the same scheme and host. However URLs are: " + str(kwargs))
result[key] = match.group(2)
return result
def run_device_legacy(device_config: HTTP, component_config: Union[HttpBatSetup, HttpCounterSetup, HttpInverterSetup]):
device = create_device(device_config)
device.add_component(component_config)
log.debug("HTTP Configuration: %s, Component Configuration: %s", device_config, component_config)
device.update()
def create_legacy_device_config(url: str):
regex = re.compile("^(https?://[^/]+)(.*)")
match = regex.search(url)
if match is None:
raise Exception("Invalid URL <" + url + ">: Absolute HTTP or HTTPS URL required")
host_scheme = match.group(1)
device_config = HTTP(configuration=HTTPConfiguration(url=host_scheme))
return device_config
def read_legacy_bat(power_path: str, imported_path: str, exported_path: str, soc_path: str) -> None:
component_config = HttpBatSetup(configuration=HttpBatConfiguration(**create_paths_dict(
power_path=power_path,
imported_path=imported_path,
exported_path=exported_path,
soc_path=soc_path,
)))
run_device_legacy(create_legacy_device_config(power_path), component_config)
def read_legacy_counter(power_path: str, imported_path: str, exported_path: str, current_l1_path: str,
current_l2_path: str, current_l3_path: str):
component_config = HttpCounterSetup(configuration=HttpCounterConfiguration(**create_paths_dict(
power_path=power_path,
imported_path=imported_path,
exported_path=exported_path,
current_l1_path=current_l1_path,
current_l2_path=current_l2_path,
current_l3_path=current_l3_path,
)))
run_device_legacy(create_legacy_device_config(power_path), component_config)
def read_legacy_inverter(power_path: str, exported_path: str, num: int):
component_config = HttpInverterSetup(id=num, configuration=HttpInverterConfiguration(**create_paths_dict(
power_path=power_path,
exported_path=exported_path,
)))
run_device_legacy(create_legacy_device_config(power_path), component_config)
def METHOD_NAME(argv: List[str]):
run_using_positional_cli_args(
{"bat": read_legacy_bat, "counter": read_legacy_counter, "inverter": read_legacy_inverter}, argv
)
device_descriptor = DeviceDescriptor(configuration_factory=HTTP) |
298,522 | add owner | from typing import Dict, Generic, List, Optional, TypeVar, Union
from urllib.parse import quote
from datahub.emitter.mcp_patch_builder import MetadataPatchProposal
from datahub.metadata.schema_classes import (
DatasetPropertiesClass as DatasetProperties,
EditableDatasetPropertiesClass as EditableDatasetProperties,
EditableSchemaMetadataClass as EditableSchemaMetadata,
GlobalTagsClass as GlobalTags,
GlossaryTermAssociationClass as Term,
GlossaryTermsClass as GlossaryTerms,
KafkaAuditHeaderClass,
OwnerClass as Owner,
OwnershipTypeClass,
SchemaMetadataClass as SchemaMetadata,
SystemMetadataClass,
TagAssociationClass as Tag,
UpstreamClass as Upstream,
UpstreamLineageClass as UpstreamLineage,
)
from datahub.specific.custom_properties import CustomPropertiesPatchHelper
from datahub.specific.ownership import OwnershipPatchHelper
from datahub.utilities.urns.tag_urn import TagUrn
from datahub.utilities.urns.urn import Urn
T = TypeVar("T", bound=MetadataPatchProposal)
class FieldPatchHelper(Generic[T]):
def __init__(
self,
parent: T,
field_path: str,
editable: bool = True,
) -> None:
self._parent: T = parent
self.field_path = field_path
self.aspect_name = (
EditableSchemaMetadata.ASPECT_NAME
if editable
else SchemaMetadata.ASPECT_NAME
)
self.aspect_field = "editableSchemaFieldInfo" if editable else "schemaFieldInfo"
def add_tag(self, tag: Tag) -> "FieldPatchHelper":
self._parent._add_patch(
self.aspect_name,
"add",
path=f"/{self.aspect_field}/{self.field_path}/globalTags/tags/{tag.tag}",
value=tag,
)
return self
def remove_tag(self, tag: Union[str, Urn]) -> "FieldPatchHelper":
if isinstance(tag, str) and not tag.startswith("urn:li:tag:"):
tag = TagUrn.create_from_id(tag)
self._parent._add_patch(
self.aspect_name,
"remove",
path=f"/{self.aspect_field}/{self.field_path}/globalTags/tags/{tag}",
value={},
)
return self
def add_term(self, term: Term) -> "FieldPatchHelper":
self._parent._add_patch(
self.aspect_name,
"add",
path=f"/{self.aspect_field}/{self.field_path}/glossaryTerms/terms/{term.urn}",
value=term,
)
return self
def remove_term(self, term: Union[str, Urn]) -> "FieldPatchHelper":
if isinstance(term, str) and not term.startswith("urn:li:glossaryTerm:"):
term = "urn:li:glossaryTerm:" + term
self._parent._add_patch(
self.aspect_name,
"remove",
path=f"/{self.aspect_field}/{self.field_path}/glossaryTerms/terms/{term}",
value={},
)
return self
def parent(self) -> T:
return self._parent
class DatasetPatchBuilder(MetadataPatchProposal):
def __init__(
self,
urn: str,
system_metadata: Optional[SystemMetadataClass] = None,
audit_header: Optional[KafkaAuditHeaderClass] = None,
) -> None:
super().__init__(
urn, "dataset", system_metadata=system_metadata, audit_header=audit_header
)
self.custom_properties_patch_helper = CustomPropertiesPatchHelper(
self, DatasetProperties.ASPECT_NAME
)
self.ownership_patch_helper = OwnershipPatchHelper(self)
def METHOD_NAME(self, owner: Owner) -> "DatasetPatchBuilder":
self.ownership_patch_helper.METHOD_NAME(owner)
return self
def remove_owner(
self, owner: str, owner_type: Optional[OwnershipTypeClass] = None
) -> "DatasetPatchBuilder":
"""
param: owner_type is optional
"""
self.ownership_patch_helper.remove_owner(owner, owner_type)
return self
def set_owners(self, owners: List[Owner]) -> "DatasetPatchBuilder":
self.ownership_patch_helper.set_owners(owners)
return self
def add_upstream_lineage(self, upstream: Upstream) -> "DatasetPatchBuilder":
self._add_patch(
UpstreamLineage.ASPECT_NAME,
"add",
path=f"/upstreams/{quote(upstream.dataset, safe='')}",
value=upstream,
)
return self
def remove_upstream_lineage(
self, dataset: Union[str, Urn]
) -> "DatasetPatchBuilder":
self._add_patch(
UpstreamLineage.ASPECT_NAME,
"remove",
path=f"/upstreams/{dataset}",
value={},
)
return self
def set_upstream_lineages(self, upstreams: List[Upstream]) -> "DatasetPatchBuilder":
self._add_patch(
UpstreamLineage.ASPECT_NAME, "replace", path="/upstreams", value=upstreams
)
return self
def add_tag(self, tag: Tag) -> "DatasetPatchBuilder":
self._add_patch(
GlobalTags.ASPECT_NAME, "add", path=f"/tags/{tag.tag}", value=tag
)
return self
def remove_tag(self, tag: Union[str, Urn]) -> "DatasetPatchBuilder":
if isinstance(tag, str) and not tag.startswith("urn:li:tag:"):
tag = TagUrn.create_from_id(tag)
self._add_patch(GlobalTags.ASPECT_NAME, "remove", path=f"/tags/{tag}", value={})
return self
def add_term(self, term: Term) -> "DatasetPatchBuilder":
self._add_patch(
GlossaryTerms.ASPECT_NAME, "add", path=f"/terms/{term.urn}", value=term
)
return self
def remove_term(self, term: Union[str, Urn]) -> "DatasetPatchBuilder":
if isinstance(term, str) and not term.startswith("urn:li:glossaryTerm:"):
term = "urn:li:glossaryTerm:" + term
self._add_patch(
GlossaryTerms.ASPECT_NAME, "remove", path=f"/terms/{term}", value={}
)
return self
def for_field(
self, field_path: str, editable: bool = True
) -> FieldPatchHelper["DatasetPatchBuilder"]:
"""
Get a helper that can perform patches against fields in the dataset
:param field_path: The field path in datahub format
:param editable: Whether patches should apply to the editable section of schema metadata or not
"""
return FieldPatchHelper(
self,
field_path,
editable=editable,
)
def set_description(
self, description: str, editable: bool = False
) -> "DatasetPatchBuilder":
self._add_patch(
DatasetProperties.ASPECT_NAME
if not editable
else EditableDatasetProperties.ASPECT_NAME,
"replace",
path="/description",
value=description,
)
return self
def set_custom_properties(
self, custom_properties: Dict[str, str]
) -> "DatasetPatchBuilder":
self._add_patch(
DatasetProperties.ASPECT_NAME,
"replace",
path="/customProperties",
value=custom_properties,
)
return self
def add_custom_property(self, key: str, value: str) -> "DatasetPatchBuilder":
self.custom_properties_patch_helper.add_property(key, value)
return self
def remove_custom_property(self, key: str) -> "DatasetPatchBuilder":
self.custom_properties_patch_helper.remove_property(key)
return self
def set_display_name(self, display_name: str) -> "DatasetPatchBuilder":
if display_name is not None:
self._add_patch(
DatasetProperties.ASPECT_NAME,
"replace",
path="/name",
value=display_name,
)
return self |
298,523 | write | #!/usr/bin/env python
# coding=utf-8
import os
import sys
import subprocess
import warnings
from io import StringIO
from contextlib import contextmanager
import wrapt
from sacred.optional import libc
from tempfile import NamedTemporaryFile
from sacred.settings import SETTINGS
def flush():
"""Try to flush all stdio buffers, both from python and from C."""
try:
sys.stdout.flush()
sys.stderr.flush()
except (AttributeError, ValueError, OSError):
pass # unsupported
try:
libc.fflush(None)
except (AttributeError, ValueError, OSError):
pass # unsupported
def get_stdcapturer(mode=None):
mode = mode if mode is not None else SETTINGS.CAPTURE_MODE
capture_options = {"no": no_tee, "fd": tee_output_fd, "sys": tee_output_python}
if mode not in capture_options:
raise KeyError(
"Unknown capture mode '{}'. Available options are {}".format(
mode, sorted(capture_options.keys())
)
)
return mode, capture_options[mode]
class TeeingStreamProxy(wrapt.ObjectProxy):
"""A wrapper around stdout or stderr that duplicates all output to out."""
def __init__(self, wrapped, out):
super().__init__(wrapped)
self._self_out = out
def METHOD_NAME(self, data):
self.__wrapped__.METHOD_NAME(data)
self._self_out.METHOD_NAME(data)
def flush(self):
self.__wrapped__.flush()
self._self_out.flush()
class CapturedStdout:
def __init__(self, buffer):
self.buffer = buffer
self.read_position = 0
self.final = None
@property
def closed(self):
return self.buffer.closed
def flush(self):
return self.buffer.flush()
def get(self):
if self.final is None:
self.buffer.seek(self.read_position)
value = self.buffer.read()
self.read_position = self.buffer.tell()
return value
else:
value = self.final
self.final = None
return value
def finalize(self):
self.flush()
self.final = self.get()
self.buffer.close()
@contextmanager
def no_tee():
out = CapturedStdout(StringIO())
try:
yield out
finally:
out.finalize()
@contextmanager
def tee_output_python():
"""Duplicate sys.stdout and sys.stderr to new StringIO."""
buffer = StringIO()
out = CapturedStdout(buffer)
orig_stdout, orig_stderr = sys.stdout, sys.stderr
flush()
sys.stdout = TeeingStreamProxy(sys.stdout, buffer)
sys.stderr = TeeingStreamProxy(sys.stderr, buffer)
try:
yield out
finally:
flush()
out.finalize()
sys.stdout, sys.stderr = orig_stdout, orig_stderr
# Duplicate stdout and stderr to a file. Inspired by:
# http://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/
# http://stackoverflow.com/a/651718/1388435
# http://stackoverflow.com/a/22434262/1388435
@contextmanager
def tee_output_fd():
"""Duplicate stdout and stderr to a file on the file descriptor level."""
with NamedTemporaryFile(mode="w+", newline="") as target:
original_stdout_fd = 1
original_stderr_fd = 2
target_fd = target.fileno()
# Save a copy of the original stdout and stderr file descriptors)
saved_stdout_fd = os.dup(original_stdout_fd)
saved_stderr_fd = os.dup(original_stderr_fd)
try:
# start_new_session=True to move process to a new process group
# this is done to avoid receiving KeyboardInterrupts (see #149)
tee_stdout = subprocess.Popen(
["tee", "-a", target.name],
start_new_session=True,
stdin=subprocess.PIPE,
stdout=1,
)
tee_stderr = subprocess.Popen(
["tee", "-a", target.name],
start_new_session=True,
stdin=subprocess.PIPE,
stdout=2,
)
except (FileNotFoundError, OSError, AttributeError):
# No tee found in this operating system. Trying to use a python
# implementation of tee. However this is slow and error-prone.
tee_stdout = subprocess.Popen(
[sys.executable, "-m", "sacred.pytee"],
stdin=subprocess.PIPE,
stderr=target_fd,
)
tee_stderr = subprocess.Popen(
[sys.executable, "-m", "sacred.pytee"],
stdin=subprocess.PIPE,
stdout=target_fd,
)
flush()
os.dup2(tee_stdout.stdin.fileno(), original_stdout_fd)
os.dup2(tee_stderr.stdin.fileno(), original_stderr_fd)
out = CapturedStdout(target)
try:
yield out # let the caller do their printing
finally:
flush()
# then redirect stdout back to the saved fd
tee_stdout.stdin.close()
tee_stderr.stdin.close()
# restore original fds
os.dup2(saved_stdout_fd, original_stdout_fd)
os.dup2(saved_stderr_fd, original_stderr_fd)
try:
tee_stdout.wait(timeout=1)
except subprocess.TimeoutExpired:
warnings.warn("tee_stdout.wait timeout. Forcibly terminating.")
tee_stdout.terminate()
try:
tee_stderr.wait(timeout=1)
except subprocess.TimeoutExpired:
warnings.warn("tee_stderr.wait timeout. Forcibly terminating.")
tee_stderr.terminate()
os.close(saved_stdout_fd)
os.close(saved_stderr_fd)
out.finalize() |
298,524 | miou metric | # type: ignore
from labelbox.data.annotation_types.metrics.scalar import ScalarMetric
from typing import List, Optional, Union
from ...annotation_types import (Label, ObjectAnnotation,
ClassificationAnnotation)
from ..group import get_feature_pairs
from .calculation import feature_miou
from .calculation import miou
def METHOD_NAME(ground_truths: List[Union[ObjectAnnotation,
ClassificationAnnotation]],
predictions: List[Union[ObjectAnnotation,
ClassificationAnnotation]],
include_subclasses=False) -> List[ScalarMetric]:
"""
Computes miou between two sets of annotations.
These annotations should relate to the same data (image/video).
Each class in the annotation list is weighted equally in the iou score.
Args:
ground_truth : Label containing human annotations or annotations known to be correct
prediction: Label representing model predictions
include_subclasses (bool): Whether or not to include subclasses in the iou calculation.
If set to True, the iou between two overlapping objects of the same type is 0 if the subclasses are not the same.
Returns:
Returns a list of ScalarMetrics. Will be empty if there were no predictions and labels. Otherwise a single metric will be returned.
"""
iou = miou(ground_truths, predictions, include_subclasses)
# If both gt and preds are empty there is no metric
if iou is None:
return []
return [ScalarMetric(metric_name="custom_iou", value=iou)]
def feature_miou_metric(ground_truths: List[Union[ObjectAnnotation,
ClassificationAnnotation]],
predictions: List[Union[ObjectAnnotation,
ClassificationAnnotation]],
include_subclasses=True) -> List[ScalarMetric]:
"""
Computes the miou for each type of class in the list of annotations.
These annotations should relate to the same data (image/video).
Args:
ground_truth : Label containing human annotations or annotations known to be correct
prediction: Label representing model predictions
include_subclasses (bool): Whether or not to include subclasses in the iou calculation.
If set to True, the iou between two overlapping objects of the same type is 0 if the subclasses are not the same.
Returns:
Returns a list of ScalarMetrics.
There will be one metric for each class in the union of ground truth and prediction classes.
"""
# Classifications are supported because we just take a naive approach to them..
annotation_pairs = get_feature_pairs(predictions, ground_truths)
metrics = []
for key in annotation_pairs:
value = feature_miou(annotation_pairs[key][0], annotation_pairs[key][1],
include_subclasses)
if value is None:
continue
metrics.append(
ScalarMetric(metric_name="custom_iou",
feature_name=key,
value=value))
return metrics
def data_row_miou(ground_truth: Label,
prediction: Label,
include_subclasses=False) -> Optional[float]:
"""
This function is no longer supported. Use miou() for raw values or miou_metric() for the metric
Calculates iou for two labels corresponding to the same data row.
Args:
ground_truth : Label containing human annotations or annotations known to be correct
prediction: Label representing model predictions
Returns:
float indicating the iou score for this data row.
Returns None if there are no annotations in ground_truth or prediction Labels
"""
return miou(ground_truth.annotations, prediction.annotations,
include_subclasses) |
298,525 | is valid | # Copyright (C) 2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
import csv
import json
import os
import random
import sys
from itertools import product
random.seed(42)
NAME = "events"
def read_rules(name):
rules = []
with open(os.path.join(sys.argv[1], f"{name}.csv")) as f:
reader = csv.DictReader(f)
for row in reader:
row = {k.lower(): v.lower().replace("n/a", "na") for k, v in row.items()}
found = False
for col, val in row.items():
if col in ["method", "url", "resource"]:
continue
complex_val = [v.strip() for v in val.split(",")]
if len(complex_val) > 1:
found = True
for item in complex_val:
new_row = row.copy()
new_row[col] = item
rules.append(new_row)
if not found:
rules.append(row)
return rules
simple_rules = read_rules(NAME)
SCOPES = list({rule["scope"] for rule in simple_rules})
CONTEXTS = ["sandbox", "organization"]
OWNERSHIPS = ["none"]
GROUPS = ["admin", "business", "user", "worker", "none"]
ORG_ROLES = ["owner", "maintainer", "supervisor", "worker", None]
SAME_ORG = [True, False]
def RESOURCES(scope):
return [None]
def is_same_org(org1, org2):
if org1 is not None and org2 is not None:
return org1["id"] == org2["id"]
elif org1 is None and org2 is None:
return True
else:
return False
def eval_rule(scope, context, ownership, privilege, membership, data):
if privilege == "admin":
return True
rules = list(filter(lambda r: scope == r["scope"], simple_rules))
rules = list(filter(lambda r: r["context"] == "na" or context == r["context"], rules))
rules = list(filter(lambda r: r["ownership"] == "na" or ownership == r["ownership"], rules))
rules = list(
filter(
lambda r: r["membership"] == "na"
or ORG_ROLES.index(membership) <= ORG_ROLES.index(r["membership"]),
rules,
)
)
rules = list(filter(lambda r: GROUPS.index(privilege) <= GROUPS.index(r["privilege"]), rules))
return bool(rules)
def get_data(scope, context, ownership, privilege, membership, resource, same_org):
data = {
"scope": scope,
"auth": {
"user": {"id": random.randrange(0, 100), "privilege": privilege},
"organization": {
"id": random.randrange(100, 200),
"owner": {"id": random.randrange(200, 300)},
"user": {"role": membership},
}
if context == "organization"
else None,
},
"resource": resource,
}
user_id = data["auth"]["user"]["id"]
if context == "organization":
if data["auth"]["organization"]["user"]["role"] == "owner":
data["auth"]["organization"]["owner"]["id"] = user_id
return data
def _get_name(prefix, **kwargs):
name = prefix
for k, v in kwargs.items():
prefix = "_" + str(k)
if isinstance(v, dict):
if "id" in v:
v = v.copy()
v.pop("id")
if v:
name += _get_name(prefix, **v)
else:
name += "".join(
map(
lambda c: c if c.isalnum() else {"@": "_IN_"}.get(c, "_"),
f"{prefix}_{str(v).upper()}",
)
)
return name
def get_name(scope, context, ownership, privilege, membership, resource, same_org):
return _get_name("test", **locals())
def METHOD_NAME(scope, context, ownership, privilege, membership, resource, same_org):
if context == "sandbox" and membership:
return False
if scope == "list" and ownership != "None":
return False
if context == "sandbox" and same_org is False:
return False
return True
def gen_test_rego(name):
with open(f"{name}_test.gen.rego", "wt") as f:
f.write(f"package {name}\n\n")
print("scopes", SCOPES)
for scope, context, ownership, privilege, membership, same_org in product(
SCOPES, CONTEXTS, OWNERSHIPS, GROUPS, ORG_ROLES, SAME_ORG
):
for resource in RESOURCES(scope):
if not METHOD_NAME(
scope, context, ownership, privilege, membership, resource, same_org
):
continue
data = get_data(
scope, context, ownership, privilege, membership, resource, same_org
)
test_name = get_name(
scope, context, ownership, privilege, membership, resource, same_org
)
result = eval_rule(scope, context, ownership, privilege, membership, data)
f.write(
"{test_name} {{\n {allow} with input as {data}\n}}\n\n".format(
test_name=test_name,
allow="allow" if result else "not allow",
data=json.dumps(data),
)
)
# Write the script which is used to generate the file
with open(sys.argv[0]) as this_file:
f.write(f"\n\n# {os.path.split(sys.argv[0])[1]}\n")
for line in this_file:
if line.strip():
f.write(f"# {line}")
else:
f.write(f"#\n")
# Write rules which are used to generate the file
with open(os.path.join(sys.argv[1], f"{name}.csv")) as rego_file:
f.write(f"\n\n# {name}.csv\n")
for line in rego_file:
if line.strip():
f.write(f"# {line}")
else:
f.write(f"#\n")
gen_test_rego(NAME) |
298,526 | extract data | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-31"] = kwargs.pop("api_version", _params.pop("api-version", "2022-10-31"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.DigitalTwins/operations")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.digitaltwins.v2022_10_31.AzureDigitalTwinsManagementClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.Operation"]:
"""Lists all of the available DigitalTwins service REST API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Operation or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.digitaltwins.v2022_10_31.models.Operation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-31"] = kwargs.pop("api_version", _params.pop("api-version", "2022-10-31"))
cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def METHOD_NAME(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, METHOD_NAME)
list.metadata = {"url": "/providers/Microsoft.DigitalTwins/operations"} |
298,527 | href | import jinja2
from dateutil import parser
from h.presenters.document_html import DocumentHTMLPresenter
class AnnotationHTMLPresenter:
"""Wraps Annotation model objects and adds some HTML properties."""
def __init__(self, annotation):
self.annotation = annotation
if self.annotation.document:
self.document = DocumentHTMLPresenter(self.annotation.document)
else:
self.document = None
@property
def uri(self):
return jinja2.escape(self.annotation.target_uri)
@property
def text_rendered(self):
"""
Get the body text of this annotation.
This return value of this field is marked safe because it is rendered
to HTML on write by :py:func:`h.util.markdown.render`, which must take
care of all necessary escaping.
"""
if self.annotation.text_rendered:
return jinja2.Markup(self.annotation.text_rendered)
return jinja2.Markup("")
@property
def quote(self):
"""Get the text in the document which this annotation refers to."""
selection = self._get_selection()
if selection:
return jinja2.escape(selection)
return ""
@property
def description(self):
"""
Get an HTML-formatted description of this annotation.
The description contains the target text that the user selected to
annotate, as a <blockquote>, and the body text of the annotation
itself.
"""
description = ""
selection = self._get_selection()
if selection:
selection = jinja2.escape(selection)
description += f"<blockquote>{selection}</blockquote>"
text = self.annotation.text
if text:
text = jinja2.escape(text)
description += f"{text}"
return description
@property
def created_day_string(self):
"""
Get a simple created day string for this annotation.
Returns a day string like '2015-03-11' from the annotation's 'created'
date.
"""
created_string = jinja2.escape(self.annotation.created)
return parser.parse(created_string).strftime("%Y-%m-%d")
@property
def document_link(self):
"""Return a link to this annotation's document."""
if self.document:
return self.document.link
return ""
@property
def filename(self):
"""Return the filename of this annotation's document."""
if self.document:
return self.document.filename
return ""
@property
def hostname_or_filename(self):
"""Return the hostname of this annotation's document."""
if self.document:
return self.document.hostname_or_filename
return ""
@property
def METHOD_NAME(self):
"""Return an href for this annotation's document, or ''."""
if self.document:
return self.document.METHOD_NAME
return ""
@property
def link_text(self):
"""Return some link text for this annotation's document."""
if self.document:
return self.document.link_text
return ""
@property
def title(self):
"""Return a title for this annotation."""
if self.document:
return self.document.title
return ""
# Explicitly forward some annotation properties for convenient access.
@property
def id(self):
return self.annotation.id
@property
def created(self):
return self.annotation.created
@property
def updated(self):
return self.annotation.updated
@property
def userid(self):
return self.annotation.userid
@property
def username(self):
return self.annotation.userid.split(":")[1].split("@")[0]
@property
def shared(self):
return self.annotation.shared
@property
def tags(self):
return self.annotation.tags
def _get_selection(self):
selectors = self.annotation.target_selectors
for selector in selectors:
if "exact" in selector:
return selector["exact"]
return None |
298,528 | test serialize kb disk | from pathlib import Path
from typing import Any, Callable, Dict, Iterable
import srsly
from numpy import zeros
from thinc.api import Config
from spacy import Errors, util
from spacy.kb.kb_in_memory import InMemoryLookupKB
from spacy.util import SimpleFrozenList, ensure_path, load_model_from_config, registry
from spacy.vocab import Vocab
from ..util import make_tempdir
def METHOD_NAME(en_vocab):
# baseline assertions
kb1 = _get_dummy_kb(en_vocab)
_check_kb(kb1)
# dumping to file & loading back in
with make_tempdir() as d:
dir_path = ensure_path(d)
if not dir_path.exists():
dir_path.mkdir()
file_path = dir_path / "kb"
kb1.to_disk(str(file_path))
kb2 = InMemoryLookupKB(vocab=en_vocab, entity_vector_length=3)
kb2.from_disk(str(file_path))
# final assertions
_check_kb(kb2)
def _get_dummy_kb(vocab):
kb = InMemoryLookupKB(vocab, entity_vector_length=3)
kb.add_entity(entity="Q53", freq=33, entity_vector=[0, 5, 3])
kb.add_entity(entity="Q17", freq=2, entity_vector=[7, 1, 0])
kb.add_entity(entity="Q007", freq=7, entity_vector=[0, 0, 7])
kb.add_entity(entity="Q44", freq=342, entity_vector=[4, 4, 4])
kb.add_alias(alias="double07", entities=["Q17", "Q007"], probabilities=[0.1, 0.9])
kb.add_alias(
alias="guy",
entities=["Q53", "Q007", "Q17", "Q44"],
probabilities=[0.3, 0.3, 0.2, 0.1],
)
kb.add_alias(alias="random", entities=["Q007"], probabilities=[1.0])
return kb
def _check_kb(kb):
# check entities
assert kb.get_size_entities() == 4
for entity_string in ["Q53", "Q17", "Q007", "Q44"]:
assert entity_string in kb.get_entity_strings()
for entity_string in ["", "Q0"]:
assert entity_string not in kb.get_entity_strings()
# check aliases
assert kb.get_size_aliases() == 3
for alias_string in ["double07", "guy", "random"]:
assert alias_string in kb.get_alias_strings()
for alias_string in ["nothingness", "", "randomnoise"]:
assert alias_string not in kb.get_alias_strings()
# check candidates & probabilities
candidates = sorted(kb.get_alias_candidates("double07"), key=lambda x: x.entity_)
assert len(candidates) == 2
assert candidates[0].entity_ == "Q007"
assert 6.999 < candidates[0].entity_freq < 7.01
assert candidates[0].entity_vector == [0, 0, 7]
assert candidates[0].alias_ == "double07"
assert 0.899 < candidates[0].prior_prob < 0.901
assert candidates[1].entity_ == "Q17"
assert 1.99 < candidates[1].entity_freq < 2.01
assert candidates[1].entity_vector == [7, 1, 0]
assert candidates[1].alias_ == "double07"
assert 0.099 < candidates[1].prior_prob < 0.101
def test_serialize_subclassed_kb():
"""Check that IO of a custom KB works fine as part of an EL pipe."""
config_string = """
[nlp]
lang = "en"
pipeline = ["entity_linker"]
[components]
[components.entity_linker]
factory = "entity_linker"
[components.entity_linker.generate_empty_kb]
@misc = "kb_test.CustomEmptyKB.v1"
[initialize]
[initialize.components]
[initialize.components.entity_linker]
[initialize.components.entity_linker.kb_loader]
@misc = "kb_test.CustomKB.v1"
entity_vector_length = 342
custom_field = 666
"""
class SubInMemoryLookupKB(InMemoryLookupKB):
def __init__(self, vocab, entity_vector_length, custom_field):
super().__init__(vocab, entity_vector_length)
self.custom_field = custom_field
def to_disk(self, path, exclude: Iterable[str] = SimpleFrozenList()):
"""We overwrite InMemoryLookupKB.to_disk() to ensure that self.custom_field is stored as well."""
path = ensure_path(path)
if not path.exists():
path.mkdir(parents=True)
if not path.is_dir():
raise ValueError(Errors.E928.format(loc=path))
def serialize_custom_fields(file_path: Path) -> None:
srsly.write_json(file_path, {"custom_field": self.custom_field})
serialize = {
"contents": lambda p: self.write_contents(p),
"strings.json": lambda p: self.vocab.strings.to_disk(p),
"custom_fields": lambda p: serialize_custom_fields(p),
}
util.to_disk(path, serialize, exclude)
def from_disk(self, path, exclude: Iterable[str] = SimpleFrozenList()):
"""We overwrite InMemoryLookupKB.from_disk() to ensure that self.custom_field is loaded as well."""
path = ensure_path(path)
if not path.exists():
raise ValueError(Errors.E929.format(loc=path))
if not path.is_dir():
raise ValueError(Errors.E928.format(loc=path))
def deserialize_custom_fields(file_path: Path) -> None:
self.custom_field = srsly.read_json(file_path)["custom_field"]
deserialize: Dict[str, Callable[[Any], Any]] = {
"contents": lambda p: self.read_contents(p),
"strings.json": lambda p: self.vocab.strings.from_disk(p),
"custom_fields": lambda p: deserialize_custom_fields(p),
}
util.from_disk(path, deserialize, exclude)
@registry.misc("kb_test.CustomEmptyKB.v1")
def empty_custom_kb() -> Callable[[Vocab, int], SubInMemoryLookupKB]:
def empty_kb_factory(vocab: Vocab, entity_vector_length: int):
return SubInMemoryLookupKB(
vocab=vocab,
entity_vector_length=entity_vector_length,
custom_field=0,
)
return empty_kb_factory
@registry.misc("kb_test.CustomKB.v1")
def custom_kb(
entity_vector_length: int, custom_field: int
) -> Callable[[Vocab], SubInMemoryLookupKB]:
def custom_kb_factory(vocab):
kb = SubInMemoryLookupKB(
vocab=vocab,
entity_vector_length=entity_vector_length,
custom_field=custom_field,
)
kb.add_entity("random_entity", 0.0, zeros(entity_vector_length))
return kb
return custom_kb_factory
config = Config().from_str(config_string)
nlp = load_model_from_config(config, auto_fill=True)
nlp.initialize()
entity_linker = nlp.get_pipe("entity_linker")
assert type(entity_linker.kb) == SubInMemoryLookupKB
assert entity_linker.kb.entity_vector_length == 342
assert entity_linker.kb.custom_field == 666
# Make sure the custom KB is serialized correctly
with make_tempdir() as tmp_dir:
nlp.to_disk(tmp_dir)
nlp2 = util.load_model_from_path(tmp_dir)
entity_linker2 = nlp2.get_pipe("entity_linker")
# After IO, the KB is the standard one
assert type(entity_linker2.kb) == SubInMemoryLookupKB
assert entity_linker2.kb.entity_vector_length == 342
assert entity_linker2.kb.custom_field == 666 |
298,529 | petsc to psydac | # coding: utf-8
import numpy as np
from math import sqrt
from psydac.linalg.stencil import StencilVectorSpace, StencilVector
from psydac.linalg.block import BlockVector, BlockVectorSpace
__all__ = ('array_to_psydac', 'petsc_to_psydac', '_sym_ortho')
def array_to_psydac(x, Xh):
""" converts a numpy array to StencilVector or BlockVector format"""
if isinstance(Xh, BlockVectorSpace):
u = BlockVector(Xh)
if isinstance(Xh.spaces[0], BlockVectorSpace):
for d in range(len(Xh.spaces)):
starts = [np.array(V.starts) for V in Xh.spaces[d].spaces]
ends = [np.array(V.ends) for V in Xh.spaces[d].spaces]
for i in range(len(starts)):
g = tuple(slice(s,e+1) for s,e in zip(starts[i], ends[i]))
shape = tuple(ends[i]-starts[i]+1)
u[d][i][g] = x[:np.product(shape)].reshape(shape)
x = x[np.product(shape):]
else:
starts = [np.array(V.starts) for V in Xh.spaces]
ends = [np.array(V.ends) for V in Xh.spaces]
for i in range(len(starts)):
g = tuple(slice(s,e+1) for s,e in zip(starts[i], ends[i]))
shape = tuple(ends[i]-starts[i]+1)
u[i][g] = x[:np.product(shape)].reshape(shape)
x = x[np.product(shape):]
elif isinstance(Xh, StencilVectorSpace):
u = StencilVector(Xh)
starts = np.array(Xh.starts)
ends = np.array(Xh.ends)
g = tuple(slice(s, e+1) for s,e in zip(starts, ends))
shape = tuple(ends-starts+1)
u[g] = x[:np.product(shape)].reshape(shape)
else:
raise ValueError('Xh must be a StencilVectorSpace or a BlockVectorSpace')
u.update_ghost_regions()
return u
def METHOD_NAME(vec, Xh):
""" converts a petsc Vec object to a StencilVector or a BlockVector format.
We gather the petsc global vector in all the processes and extract the chunk owned by the Psydac Vector.
.. warning: This function will not work if the global vector does not fit in the process memory.
"""
if isinstance(Xh, BlockVectorSpace):
u = BlockVector(Xh)
if isinstance(Xh.spaces[0], BlockVectorSpace):
comm = u[0][0].space.cart.global_comm
dtype = u[0][0].space.dtype
sendcounts = np.array(comm.allgather(len(vec.array)))
recvbuf = np.empty(sum(sendcounts), dtype=dtype)
# gather the global array in all the procs
comm.Allgatherv(sendbuf=vec.array, recvbuf=(recvbuf, sendcounts))
inds = 0
for d in range(len(Xh.spaces)):
starts = [np.array(V.starts) for V in Xh.spaces[d].spaces]
ends = [np.array(V.ends) for V in Xh.spaces[d].spaces]
for i in range(len(starts)):
idx = tuple( slice(m*p,-m*p) for m,p in zip(u.space.spaces[d].spaces[i].pads, u.space.spaces[d].spaces[i].shifts) )
shape = tuple(ends[i]-starts[i]+1)
npts = Xh.spaces[d].spaces[i].npts
# compute the global indices of the coefficents owned by the process using starts and ends
indices = np.array([np.ravel_multi_index( [s+x for s,x in zip(starts[i], xx)], dims=npts, order='C' ) for xx in np.ndindex(*shape)] )
vals = recvbuf[indices+inds]
u[d][i]._data[idx] = vals.reshape(shape)
inds += np.product(npts)
else:
comm = u[0].space.cart.global_comm
dtype = u[0].space.dtype
sendcounts = np.array(comm.allgather(len(vec.array)))
recvbuf = np.empty(sum(sendcounts), dtype=dtype)
# gather the global array in all the procs
comm.Allgatherv(sendbuf=vec.array, recvbuf=(recvbuf, sendcounts))
inds = 0
starts = [np.array(V.starts) for V in Xh.spaces]
ends = [np.array(V.ends) for V in Xh.spaces]
for i in range(len(starts)):
idx = tuple( slice(m*p,-m*p) for m,p in zip(u.space.spaces[i].pads, u.space.spaces[i].shifts) )
shape = tuple(ends[i]-starts[i]+1)
npts = Xh.spaces[i].npts
# compute the global indices of the coefficents owned by the process using starts and ends
indices = np.array([np.ravel_multi_index( [s+x for s,x in zip(starts[i], xx)], dims=npts, order='C' ) for xx in np.ndindex(*shape)] )
vals = recvbuf[indices+inds]
u[i]._data[idx] = vals.reshape(shape)
inds += np.product(npts)
elif isinstance(Xh, StencilVectorSpace):
u = StencilVector(Xh)
comm = u.space.cart.global_comm
dtype = u.space.dtype
sendcounts = np.array(comm.allgather(len(vec.array)))
recvbuf = np.empty(sum(sendcounts), dtype=dtype)
# gather the global array in all the procs
comm.Allgatherv(sendbuf=vec.array, recvbuf=(recvbuf, sendcounts))
# compute the global indices of the coefficents owned by the process using starts and ends
starts = np.array(Xh.starts)
ends = np.array(Xh.ends)
shape = tuple(ends-starts+1)
npts = Xh.npts
indices = np.array([np.ravel_multi_index( [s+x for s,x in zip(starts, xx)], dims=npts, order='C' ) for xx in np.ndindex(*shape)] )
idx = tuple( slice(m*p,-m*p) for m,p in zip(u.space.pads, u.space.shifts) )
vals = recvbuf[indices]
u._data[idx] = vals.reshape(shape)
else:
raise ValueError('Xh must be a StencilVectorSpace or a BlockVectorSpace')
u.update_ghost_regions()
return u
def _sym_ortho(a, b):
"""
Stable implementation of Givens rotation.
This function was taken from the scipy repository
https://github.com/scipy/scipy/blob/master/scipy/sparse/linalg/isolve/lsqr.py
Notes
-----
The routine 'SymOrtho' was added for numerical stability. This is
recommended by S.-C. Choi in [1]_. It removes the unpleasant potential of
``1/eps`` in some important places (see, for example text following
"Compute the next plane rotation Qk" in minres.py).
References
----------
.. [1] S.-C. Choi, "Iterative Methods for Singular Linear Equations
and Least-Squares Problems", Dissertation,
http://www.stanford.edu/group/SOL/dissertations/sou-cheng-choi-thesis.pdf
"""
if b == 0:
return np.sign(a), 0, abs(a)
elif a == 0:
return 0, np.sign(b), abs(b)
elif abs(b) > abs(a):
tau = a / b
s = np.sign(b) / sqrt(1 + tau * tau)
c = s * tau
r = b / s
else:
tau = b / a
c = np.sign(a) / sqrt(1+tau*tau)
s = c * tau
r = a / c
return c, s, r |
298,530 | type | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetSharedCommitmentPlanResult',
'AwaitableGetSharedCommitmentPlanResult',
'get_shared_commitment_plan',
'get_shared_commitment_plan_output',
]
@pulumi.output_type
class GetSharedCommitmentPlanResult:
"""
Cognitive Services account commitment plan.
"""
def __init__(__self__, etag=None, id=None, kind=None, location=None, name=None, properties=None, sku=None, system_data=None, tags=None, METHOD_NAME=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", METHOD_NAME)
@property
@pulumi.getter
def etag(self) -> str:
"""
Resource Etag.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
The Kind of the resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.CommitmentPlanPropertiesResponse':
"""
Properties of Cognitive Services account commitment plan.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
The resource model definition representing SKU
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetSharedCommitmentPlanResult(GetSharedCommitmentPlanResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSharedCommitmentPlanResult(
etag=self.etag,
id=self.id,
kind=self.kind,
location=self.location,
name=self.name,
properties=self.properties,
sku=self.sku,
system_data=self.system_data,
tags=self.tags,
METHOD_NAME=self.METHOD_NAME)
def get_shared_commitment_plan(commitment_plan_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSharedCommitmentPlanResult:
"""
Returns a Cognitive Services commitment plan specified by the parameters.
:param str commitment_plan_name: The name of the commitmentPlan associated with the Cognitive Services Account
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['commitmentPlanName'] = commitment_plan_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:cognitiveservices/v20230501:getSharedCommitmentPlan', __args__, opts=opts, typ=GetSharedCommitmentPlanResult).value
return AwaitableGetSharedCommitmentPlanResult(
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
kind=pulumi.get(__ret__, 'kind'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
sku=pulumi.get(__ret__, 'sku'),
system_data=pulumi.get(__ret__, 'system_data'),
tags=pulumi.get(__ret__, 'tags'),
METHOD_NAME=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_shared_commitment_plan)
def get_shared_commitment_plan_output(commitment_plan_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSharedCommitmentPlanResult]:
"""
Returns a Cognitive Services commitment plan specified by the parameters.
:param str commitment_plan_name: The name of the commitmentPlan associated with the Cognitive Services Account
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
... |
298,531 | target work | from __future__ import absolute_import, division, print_function
import iotbx.pdb
import mmtbx.f_model
from scitbx.array_family import flex
from libtbx import group_args
from cctbx import xray
import scitbx.lbfgs
from libtbx import adopt_init_args
import cctbx
from six.moves import range
pdb_str = """
CRYST1 12.000 11.000 13.000 80.00 70.00 100.00 P 1
SCALE1 0.083333 0.014694 -0.035164 0.00000
SCALE2 0.000000 0.092312 -0.024020 0.00000
SCALE3 0.000000 0.000000 0.084586 0.00000
ATOM 1 CB PHE A 1 7.353 5.743 7.446 1.00 11.07 C
ANISOU 1 CB PHE A 1 1417 1711 1077 -802 -534 562 C
ATOM 2 CG PHE A 1 6.587 5.028 8.521 1.00 12.41 C
ATOM 3 CD1 PHE A 1 5.463 4.281 8.210 1.00 15.10 C
ANISOU 3 CD1 PHE A 1 2242 1692 1805 -865 -520 173 C
ATOM 4 CD2 PHE A 1 6.993 5.104 9.843 1.00 11.77 C
ATOM 5 CE1 PHE A 1 4.758 3.623 9.198 1.00 15.96 C
ATOM 6 CE2 PHE A 1 6.292 4.449 10.836 1.00 12.44 C
ANISOU 6 CE2 PHE A 1 1794 1178 1756 -466 -772 83 C
ATOM 7 CZ PHE A 1 5.173 3.707 10.513 1.00 14.49 C
ANISOU 7 CZ PHE A 1 2230 1388 1889 -462 -737 32 C
ATOM 8 C PHE A 1 7.886 7.946 6.389 1.00 15.51 C
ANISOU 8 C PHE A 1 1740 2635 1517 -904 -600 967 C
ATOM 9 O PHE A 1 8.151 7.695 5.214 1.00 16.93 O
ANISOU 9 O PHE A 1 1943 2817 1671 -1003 -687 1048 O
ATOM 10 OXT PHE A 1 8.501 8.858 6.941 1.00 19.45 O
ATOM 11 N PHE A 1 5.580 7.078 6.395 1.00 13.11 N
ANISOU 11 N PHE A 1 1400 1945 1635 -826 -589 838 N
ATOM 12 CA PHE A 1 6.829 7.148 7.143 1.00 12.44 C
TER
END
"""
class minimizer(object):
def __init__(self,
fmodel,
max_iterations=25):
self.fmodel = fmodel
self.fmodel.xray_structure.scatterers().flags_set_grads(state=False)
self.x_target_functor = self.fmodel.target_functor()
self.fmodel.xray_structure.scatterers().flags_set_grad_u_aniso(
iselection = self.fmodel.xray_structure.use_u_aniso().iselection())
self.fmodel.xray_structure.scatterers().flags_set_grad_u_iso(
iselection = self.fmodel.xray_structure.use_u_iso().iselection())
self.x = flex.double(self.fmodel.xray_structure.n_parameters(), 0)
self._scatterers_start = self.fmodel.xray_structure.scatterers()
self.call_count = 0
self.minimizer = scitbx.lbfgs.run(
target_evaluator=self,
termination_params=scitbx.lbfgs.termination_parameters(
max_iterations=max_iterations),
exception_handling_params=scitbx.lbfgs.exception_handling_parameters(
ignore_line_search_failed_rounding_errors=True,
ignore_line_search_failed_step_at_lower_bound=True,
ignore_line_search_failed_maxfev=True))
self.fmodel.xray_structure.tidy_us()
self.apply_shifts()
del self._scatterers_start
self.fmodel.update_xray_structure(
xray_structure = self.fmodel.xray_structure,
update_f_calc = True)
def apply_shifts(self):
apply_shifts_result = xray.ext.minimization_apply_shifts(
unit_cell = self.fmodel.xray_structure.unit_cell(),
scatterers = self._scatterers_start,
shifts = self.x)
scatterers_shifted = apply_shifts_result.shifted_scatterers
self.fmodel.xray_structure.replace_scatterers(
scatterers = scatterers_shifted)
self.fmodel.update_xray_structure(
xray_structure = self.fmodel.xray_structure,
update_f_calc = True)
def compute_functional_and_gradients(self):
self.apply_shifts()
tgx = func(fmodel=self.fmodel)
f = tgx.METHOD_NAME()
g = tgx.grads_u_anisos()
xray.minimization.add_gradients(
scatterers = self.fmodel.xray_structure.scatterers(),
xray_gradients = g)
self.call_count += 1
return f, g
class func(object):
def __init__(self, fmodel):
adopt_init_args(self, locals())
weights = flex.double(self.fmodel.f_obs().data().size(), 1.0)
self.core = xray.target_functors.least_squares(
compute_scale_using_all_data = False,
f_obs = self.fmodel.f_obs(),
r_free_flags = self.fmodel.r_free_flags(),
weights = weights,
scale_factor = 1)
self.r = self.core(f_calc = self.fmodel.f_model(), compute_gradients=True)
self.d_target_d_f_calc = self.r.gradients_work() # XXX needs scales
self.ge = cctbx.xray.structure_factors.gradients(
miller_set = self.fmodel.f_obs())
def METHOD_NAME(self):
return self.r.METHOD_NAME()
def grads_u_anisos(self):
return self.ge(
u_iso_refinable_params = None,
d_target_d_f_calc = self.d_target_d_f_calc,
xray_structure = self.fmodel.xray_structure,
n_parameters = self.fmodel.xray_structure.n_parameters(),
miller_set = self.fmodel.f_obs_work(),
algorithm = "direct").packed()
def get_inputs(pdb_str):
pdb_inp = iotbx.pdb.input(source_info=None, lines = pdb_str)
ph = pdb_inp.construct_hierarchy()
xrs = ph.extract_xray_structure(crystal_symmetry =
pdb_inp.crystal_symmetry())
return group_args(pdb_hierarchy = ph, xray_structure = xrs)
def run():
# get xray_structure from PDB file
inp = get_inputs(pdb_str = pdb_str)
if(1):
inp.pdb_hierarchy.adopt_xray_structure(inp.xray_structure)
inp.pdb_hierarchy.write_pdb_file(file_name="start.pdb")
# simulate poor starting model
xrs_poor = inp.xray_structure.deep_copy_scatterers()
xrs_poor.shake_adp(aniso_spread=1.5, random_u_cart_scale=10.0)
if(1):
inp.pdb_hierarchy.adopt_xray_structure(xrs_poor)
inp.pdb_hierarchy.write_pdb_file(file_name="poor.pdb")
# simulate Fobs
f_obs = abs(inp.xray_structure.structure_factors(
d_min = 1.0,
algorithm="direct").f_calc())
r_free_flags = f_obs.generate_r_free_flags()
# get fmodel
params = mmtbx.f_model.sf_and_grads_accuracy_master_params.extract()
params.algorithm = "direct"
fmodel = mmtbx.f_model.manager(
f_obs = f_obs,
r_free_flags = r_free_flags,
xray_structure = xrs_poor,
sf_and_grads_accuracy_params = params,
target_name = "ls_wunit_kunit")
# refinement loop
print("start r_factor: %6.4f" % fmodel.r_work())
total_call_count = 0
for macro_cycle in range(10):
minimized = minimizer(fmodel = fmodel)
total_call_count += minimized.call_count
print(" macro_cycle %3d (adp) r_factor: %6.4f call_count=%d" % \
(macro_cycle, fmodel.r_work(), minimized.call_count))
print('total_call_count =', total_call_count)
if(1):
inp.pdb_hierarchy.adopt_xray_structure(fmodel.xray_structure)
inp.pdb_hierarchy.write_pdb_file(file_name="refined.pdb")
if (__name__ == "__main__"):
run() |
298,532 | get captcha answer | from __future__ import absolute_import
import re
import requests
try:
import polling
except ImportError:
raise ImportError(
"Please install the python module 'polling' via pip or download it from "
"https://github.com/justiniso/polling/"
)
from ..exceptions import (
reCaptchaServiceUnavailable,
reCaptchaAPIError,
reCaptchaTimeout,
reCaptchaParameter,
reCaptchaBadJobID
)
from . import reCaptcha
class captchaSolver(reCaptcha):
def __init__(self):
super(captchaSolver, self).__init__('9kw')
self.host = 'https://www.9kw.eu/index.cgi'
self.maxtimeout = 180
self.session = requests.Session()
# ------------------------------------------------------------------------------- #
@staticmethod
def checkErrorStatus(response):
if response.status_code in [500, 502]:
raise reCaptchaServiceUnavailable(
f'9kw: Server Side Error {response.status_code}'
)
error_codes = {
1: 'No API Key available.',
2: 'No API key found.',
3: 'No active API key found.',
4: 'API Key has been disabled by the operator. ',
5: 'No user found.',
6: 'No data found.',
7: 'Found No ID.',
8: 'found No captcha.',
9: 'No image found.',
10: 'Image size not allowed.',
11: 'credit is not sufficient.',
12: 'what was done.',
13: 'No answer contain.',
14: 'Captcha already been answered.',
15: 'Captcha to quickly filed.',
16: 'JD check active.',
17: 'Unknown problem.',
18: 'Found No ID.',
19: 'Incorrect answer.',
20: 'Do not timely filed (Incorrect UserID).',
21: 'Link not allowed.',
22: 'Prohibited submit.',
23: 'Entering prohibited.',
24: 'Too little credit.',
25: 'No entry found.',
26: 'No Conditions accepted.',
27: 'No coupon code found in the database.',
28: 'Already unused voucher code.',
29: 'maxTimeout under 60 seconds.',
30: 'User not found.',
31: 'An account is not yet 24 hours in system.',
32: 'An account does not have the full rights.',
33: 'Plugin needed a update.',
34: 'No HTTPS allowed.',
35: 'No HTTP allowed.',
36: 'Source not allowed.',
37: 'Transfer denied.',
38: 'Incorrect answer without space',
39: 'Incorrect answer with space',
40: 'Incorrect answer with not only numbers',
41: 'Incorrect answer with not only A-Z, a-z',
42: 'Incorrect answer with not only 0-9, A-Z, a-z',
43: 'Incorrect answer with not only [0-9,- ]',
44: 'Incorrect answer with not only [0-9A-Za-z,- ]',
45: 'Incorrect answer with not only coordinates',
46: 'Incorrect answer with not only multiple coordinates',
47: 'Incorrect answer with not only data',
48: 'Incorrect answer with not only rotate number',
49: 'Incorrect answer with not only text',
50: 'Incorrect answer with not only text and too short',
51: 'Incorrect answer with not enough chars',
52: 'Incorrect answer with too many chars',
53: 'Incorrect answer without no or yes',
54: 'Assignment was not found.'
}
if response.text.startswith('{'):
if response.json().get('error'):
raise reCaptchaAPIError(error_codes.get(int(response.json().get('error'))))
else:
error_code = int(re.search(r'^00(?P<error_code>\d+)', response.text).groupdict().get('error_code', 0))
if error_code:
raise reCaptchaAPIError(error_codes.get(error_code))
# ------------------------------------------------------------------------------- #
def requestJob(self, jobID):
if not jobID:
raise reCaptchaBadJobID(
"9kw: Error bad job id to request reCaptcha against."
)
def _checkRequest(response):
if response.ok and response.json().get('answer') != 'NO DATA':
return response
self.checkErrorStatus(response)
return None
response = polling.poll(
lambda: self.session.get(
self.host,
params={
'apikey': self.api_key,
'action': 'usercaptchacorrectdata',
'id': jobID,
'info': 1,
'json': 1
}
),
check_success=_checkRequest,
step=10,
timeout=(self.maxtimeout + 10)
)
if response:
return response.json().get('answer')
else:
raise reCaptchaTimeout("9kw: Error failed to solve reCaptcha.")
# ------------------------------------------------------------------------------- #
def requestSolve(self, captchaType, url, siteKey):
def _checkRequest(response):
if response.ok and response.text.startswith('{') and response.json().get('captchaid'):
return response
self.checkErrorStatus(response)
return None
captchaMap = {
'reCaptcha': 'recaptchav2',
'hCaptcha': 'hcaptcha'
}
response = polling.poll(
lambda: self.session.post(
self.host,
data={
'apikey': self.api_key,
'action': 'usercaptchaupload',
'interactive': 1,
'file-upload-01': siteKey,
'oldsource': captchaMap[captchaType],
'pageurl': url,
'maxtimeout': self.maxtimeout,
'json': 1
},
allow_redirects=False
),
check_success=_checkRequest,
step=5,
timeout=(self.maxtimeout + 10)
)
if response:
return response.json().get('captchaid')
else:
raise reCaptchaBadJobID('9kw: Error no valid job id was returned.')
# ------------------------------------------------------------------------------- #
def METHOD_NAME(self, captchaType, url, siteKey, reCaptchaParams):
jobID = None
if not reCaptchaParams.get('api_key'):
raise reCaptchaParameter("9kw: Missing api_key parameter.")
self.api_key = reCaptchaParams.get('api_key')
if reCaptchaParams.get('maxtimeout'):
self.maxtimeout = reCaptchaParams.get('maxtimeout')
if reCaptchaParams.get('proxy'):
self.session.proxies = reCaptchaParams.get('proxies')
try:
jobID = self.requestSolve(captchaType, url, siteKey)
return self.requestJob(jobID)
except polling.TimeoutException:
raise reCaptchaTimeout(
f"9kw: reCaptcha solve took to long to execute 'captchaid' {jobID}, aborting."
)
# ------------------------------------------------------------------------------- #
captchaSolver() |
298,533 | fake new | #!/usr/bin/env python3
import sys
import pickle
import struct
import pprint
import zipfile
import fnmatch
from typing import Any, IO, BinaryIO, Union
__all__ = ["FakeObject", "FakeClass", "DumpUnpickler", "main"]
class FakeObject:
def __init__(self, module, name, args):
self.module = module
self.name = name
self.args = args
# NOTE: We don't distinguish between state never set and state set to None.
self.state = None
def __repr__(self):
state_str = "" if self.state is None else f"(state={self.state!r})"
return f"{self.module}.{self.name}{self.args!r}{state_str}"
def __setstate__(self, state):
self.state = state
@staticmethod
def pp_format(printer, obj, stream, indent, allowance, context, level):
if not obj.args and obj.state is None:
stream.write(repr(obj))
return
if obj.state is None:
stream.write(f"{obj.module}.{obj.name}")
printer._format(obj.args, stream, indent + 1, allowance + 1, context, level)
return
if not obj.args:
stream.write(f"{obj.module}.{obj.name}()(state=\n")
indent += printer._indent_per_level
stream.write(" " * indent)
printer._format(obj.state, stream, indent, allowance + 1, context, level + 1)
stream.write(")")
return
raise Exception("Need to implement")
class FakeClass:
def __init__(self, module, name):
self.module = module
self.name = name
self.__new__ = self.METHOD_NAME # type: ignore[assignment]
def __repr__(self):
return f"{self.module}.{self.name}"
def __call__(self, *args):
return FakeObject(self.module, self.name, args)
def METHOD_NAME(self, *args):
return FakeObject(self.module, self.name, args[1:])
class DumpUnpickler(pickle._Unpickler): # type: ignore[name-defined]
def __init__(
self,
file,
*,
catch_invalid_utf8=False,
**kwargs):
super().__init__(file, **kwargs)
self.catch_invalid_utf8 = catch_invalid_utf8
def find_class(self, module, name):
return FakeClass(module, name)
def persistent_load(self, pid):
return FakeObject("pers", "obj", (pid,))
dispatch = dict(pickle._Unpickler.dispatch) # type: ignore[attr-defined]
# Custom objects in TorchScript are able to return invalid UTF-8 strings
# from their pickle (__getstate__) functions. Install a custom loader
# for strings that catches the decode exception and replaces it with
# a sentinel object.
def load_binunicode(self):
strlen, = struct.unpack("<I", self.read(4)) # type: ignore[attr-defined]
if strlen > sys.maxsize:
raise Exception("String too long.")
str_bytes = self.read(strlen) # type: ignore[attr-defined]
obj: Any
try:
obj = str(str_bytes, "utf-8", "surrogatepass")
except UnicodeDecodeError as exn:
if not self.catch_invalid_utf8:
raise
obj = FakeObject("builtin", "UnicodeDecodeError", (str(exn),))
self.append(obj) # type: ignore[attr-defined]
dispatch[pickle.BINUNICODE[0]] = load_binunicode # type: ignore[assignment]
@classmethod
def dump(cls, in_stream, out_stream):
value = cls(in_stream).load()
pprint.pprint(value, stream=out_stream)
return value
def main(argv, output_stream=None):
if len(argv) != 2:
# Don't spam stderr if not using stdout.
if output_stream is not None:
raise Exception("Pass argv of length 2.")
sys.stderr.write("usage: show_pickle PICKLE_FILE\n")
sys.stderr.write(" PICKLE_FILE can be any of:\n")
sys.stderr.write(" path to a pickle file\n")
sys.stderr.write(" file.zip@member.pkl\n")
sys.stderr.write(" file.zip@*/pattern.*\n")
sys.stderr.write(" (shell glob pattern for members)\n")
sys.stderr.write(" (only first match will be shown)\n")
return 2
fname = argv[1]
handle: Union[IO[bytes], BinaryIO]
if "@" not in fname:
with open(fname, "rb") as handle:
DumpUnpickler.dump(handle, output_stream)
else:
zfname, mname = fname.split("@", 1)
with zipfile.ZipFile(zfname) as zf:
if "*" not in mname:
with zf.open(mname) as handle:
DumpUnpickler.dump(handle, output_stream)
else:
found = False
for info in zf.infolist():
if fnmatch.fnmatch(info.filename, mname):
with zf.open(info) as handle:
DumpUnpickler.dump(handle, output_stream)
found = True
break
if not found:
raise Exception(f"Could not find member matching {mname} in {zfname}")
if __name__ == "__main__":
# This hack works on every version of Python I've tested.
# I've tested on the following versions:
# 3.7.4
if True:
pprint.PrettyPrinter._dispatch[FakeObject.__repr__] = FakeObject.pp_format # type: ignore[attr-defined]
sys.exit(main(sys.argv)) |
298,534 | build list request | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def METHOD_NAME(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-02-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.ContainerService/operations")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.containerservice.v2023_02_01.ContainerServiceClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version")
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.OperationValue"]:
"""Gets a list of operations.
Gets a list of operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationValue or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2023_02_01.models.OperationValue]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-02-01"))
cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = METHOD_NAME(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/providers/Microsoft.ContainerService/operations"} |
298,535 | ppp | from functools import wraps
class _PPP_CB:
'''
Internal class to keep track of a the functions to run when a PPP-style
callback is triggered
'''
def __init__(self):
self.callbacks = []
def run(self, *args):
for targ_func in self.callbacks:
targ_func(*args)
def add_callback(self, fn):
assert(fn not in self.callbacks), "Duplicate callback"
self.callbacks.append(fn)
class PyPlugin:
def __init__(self, panda):
'''
Base class which PyPANDA plugins should inherit. Subclasses may
register callbacks using the provided panda object and use the
PyPlugin APIs:
* self.get_args or self.get_arg_bool to check argument values
* self.ppp to interact with other PyPlugins via PPP interfaces
* self.ppp_cb_boilerplate('cb_name') to register a ppp-style callback
* self.ppp_run_cb('cb_name') to run a previously-registered ppp-style callback
* @PyPlugin.ppp_export to mark a class method as ppp-exported
For more information, check out the pyplugin documentation.
'''
# Parent init method which will be called prior to child __init__
def __preinit__(self, pypluginmgr, args):
self.ppp_cbs = {} # ppp callback name => _PPP instance which tracks registered cbs and runs them
self.args = args
self.pypluginmgr = pypluginmgr
@property
def METHOD_NAME(self):
# Why is this a property you ask? Because it makes it easier to set a docstring
'''
The .ppp property of the PyPlugin class is used for accessing PPP methods and callbacks
exposed by other PyPlugins. (Under the hood, this is a refernece to the PyPluginManager.ppp
property).
Through self.ppp, you can reference another PyPlugin by name, e.g., if a previously-loaded plugin
is named `Server`, from your plugin you can do `self.ppp.Server` to access PPP-exported methods.
From there, you can run PPP-exported functions by name: `self.ppp.Server.some_exported_fn(*args)`.
Or you can register a local class method a PPP-style callback provided by the other plugin:
`self.ppp.server.ppp_reg_cb('some_provided_callback', self.some_local_method)`
'''
return self.pypluginmgr.METHOD_NAME
@staticmethod
def ppp_export(method):
'''
Decorator to apply to a class method in a PyPlugin to indicate that other plugins should
be allowed to call this function. Example:
from pandare import PyPlugin
Class Server(PyPlugin):
def __init__(self, panda):
pass
@PyPlugin.ppp_export
def do_add(self, x):
return x+1
Class Client(PyPlugin):
def __init__(self, panda):
print(self.ppp.Server.do_add(1))
'''
@wraps(method)
def f(*args, **kwargs):
return method(*args, **kwargs)
f.__is_pyplugin_ppp = True
f.__original_method = method
return f
# Argument loading
def get_arg(self, arg_name):
'''
Returns either the argument as a string or None if the argument
wasn't passed (arguments passed in bool form (i.e., set but with no value)
instead of key/value form will also return None).
'''
if arg_name in self.args:
return self.args[arg_name]
return None
def get_arg_bool(self, arg_name):
'''
Returns True if the argument is set and has a truthy value
'''
if arg_name not in self.args:
# Argument name unset - it's false
return False
arg_val = self.args[arg_name]
if isinstance(arg_val, bool):
# If it's a python bol already, just return it
return arg_val
if isinstance(arg_val, str):
# string of true/y/1 is True
return arg_val.lower() in ['true', 'y', '1']
if isinstance(arg_val, int):
# Nonzero is True
return arg_val != 0
# If it's not a string, int, or bool something is weird
raise ValueError(f"Unsupported arg type: {type(arg_val)}")
# Callback definition / registration / use. Note these functions mirror the behavior of the macros used
# in C plugin, check out docs/readme.md for additional details.
def ppp_cb_boilerplate(self, cb_name):
'''
"Define" a PPP-style function in this plugin. Note that there is no type
information because this is Python. Run via .ppp[cb_name].run(...)
'''
plugin_name = self.__class__.__name__
if cb_name in self.ppp_cbs:
raise ValueError(f"PPP function {cb_name} is being redefined in {plugin_name}")
# Add two callbacks into our PPP namesapce: fn_add and fn_run
this_ppp_cb = _PPP_CB()
self.METHOD_NAME.add(self.__class__.__name__, "ppp_reg_cb_" + cb_name, this_ppp_cb.add_callback)
self.METHOD_NAME.add(self.__class__.__name__, "ppp_run_cb_" + cb_name, this_ppp_cb.run)
# Make sure we have a helper self.ppp[class].ppp_reg_cb which just calls
# the ppp_reg_[cb_name] we just saved
try:
getattr(getattr(self.METHOD_NAME, self.__class__.__name__), "ppp_reg_cb")
except AttributeError:
def _reg_cb(target_ppp, func):
getattr(getattr(self.METHOD_NAME,
self.__class__.__name__), "ppp_reg_cb_" + target_ppp)(func)
self.METHOD_NAME.add(self.__class__.__name__, "ppp_reg_cb", _reg_cb)
def ppp_run_cb(self, target_ppp, *args):
'''
Trigger a previously defind PPP-style callback named `target_ppp` in this plugin with `args`
Any other pyplugins which have registered a function to run on this callback will be called with `args`.
'''
getattr(getattr(self.METHOD_NAME, self.__class__.__name__), "ppp_run_cb_" + target_ppp)(*args) |
298,536 | format func | """
===================================
Simple 1D Kernel Density Estimation
===================================
This example uses the :class:`~sklearn.neighbors.KernelDensity` class to
demonstrate the principles of Kernel Density Estimation in one dimension.
The first plot shows one of the problems with using histograms to visualize
the density of points in 1D. Intuitively, a histogram can be thought of as a
scheme in which a unit "block" is stacked above each point on a regular grid.
As the top two panels show, however, the choice of gridding for these blocks
can lead to wildly divergent ideas about the underlying shape of the density
distribution. If we instead center each block on the point it represents, we
get the estimate shown in the bottom left panel. This is a kernel density
estimation with a "top hat" kernel. This idea can be generalized to other
kernel shapes: the bottom-right panel of the first figure shows a Gaussian
kernel density estimate over the same distribution.
Scikit-learn implements efficient kernel density estimation using either
a Ball Tree or KD Tree structure, through the
:class:`~sklearn.neighbors.KernelDensity` estimator. The available kernels
are shown in the second figure of this example.
The third figure compares kernel density estimates for a distribution of 100
samples in 1 dimension. Though this example uses 1D distributions, kernel
density estimation is easily and efficiently extensible to higher dimensions
as well.
"""
# Author: Jake Vanderplas <jakevdp@cs.washington.edu>
#
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
# ----------------------------------------------------------------------
# Plot the progression of histograms to kernels
np.random.seed(1)
N = 20
X = np.concatenate(
(np.random.normal(0, 1, int(0.3 * N)), np.random.normal(5, 1, int(0.7 * N)))
)[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# histogram 1
ax[0, 0].hist(X[:, 0], bins=bins, fc="#AAAAFF", density=True)
ax[0, 0].text(-3.5, 0.31, "Histogram")
# histogram 2
ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc="#AAAAFF", density=True)
ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted")
# tophat KDE
kde = KernelDensity(kernel="tophat", bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc="#AAAAFF")
ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density")
# Gaussian KDE
kde = KernelDensity(kernel="gaussian", bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc="#AAAAFF")
ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density")
for axi in ax.ravel():
axi.plot(X[:, 0], np.full(X.shape[0], -0.01), "+k")
axi.set_xlim(-4, 9)
axi.set_ylim(-0.02, 0.34)
for axi in ax[:, 0]:
axi.set_ylabel("Normalized Density")
for axi in ax[1, :]:
axi.set_xlabel("x")
# ----------------------------------------------------------------------
# Plot all available kernels
X_plot = np.linspace(-6, 6, 1000)[:, None]
X_src = np.zeros((1, 1))
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)
def METHOD_NAME(x, loc):
if x == 0:
return "0"
elif x == 1:
return "h"
elif x == -1:
return "-h"
else:
return "%ih" % x
for i, kernel in enumerate(
["gaussian", "tophat", "epanechnikov", "exponential", "linear", "cosine"]
):
axi = ax.ravel()[i]
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)
axi.fill(X_plot[:, 0], np.exp(log_dens), "-k", fc="#AAAAFF")
axi.text(-2.6, 0.95, kernel)
axi.xaxis.set_major_formatter(plt.FuncFormatter(METHOD_NAME))
axi.xaxis.set_major_locator(plt.MultipleLocator(1))
axi.yaxis.set_major_locator(plt.NullLocator())
axi.set_ylim(0, 1.05)
axi.set_xlim(-2.9, 2.9)
ax[0, 1].set_title("Available Kernels")
# ----------------------------------------------------------------------
# Plot a 1D density example
N = 100
np.random.seed(1)
X = np.concatenate(
(np.random.normal(0, 1, int(0.3 * N)), np.random.normal(5, 1, int(0.7 * N)))
)[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
true_dens = 0.3 * norm(0, 1).pdf(X_plot[:, 0]) + 0.7 * norm(5, 1).pdf(X_plot[:, 0])
fig, ax = plt.subplots()
ax.fill(X_plot[:, 0], true_dens, fc="black", alpha=0.2, label="input distribution")
colors = ["navy", "cornflowerblue", "darkorange"]
kernels = ["gaussian", "tophat", "epanechnikov"]
lw = 2
for color, kernel in zip(colors, kernels):
kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)
log_dens = kde.score_samples(X_plot)
ax.plot(
X_plot[:, 0],
np.exp(log_dens),
color=color,
lw=lw,
linestyle="-",
label="kernel = '{0}'".format(kernel),
)
ax.text(6, 0.38, "N={0} points".format(N))
ax.legend(loc="upper left")
ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), "+k")
ax.set_xlim(-4, 9)
ax.set_ylim(-0.02, 0.4)
plt.show() |
298,537 | generate compare value | """
Copyright 2022 Inmanta
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contact: code@inmanta.com
"""
import json
from typing import Dict, List, Optional, Set
from inmanta import resources
from inmanta.data.model import AttributeDiff, ResourceDiff, ResourceDiffStatus, ResourceIdStr
class Attribute:
def __init__(self, name: str, value: object) -> None:
self._name = name
self._value = value
if self._name == "requires":
# Sort the requires list
self._value = sorted([resources.Id.parse_id(req).resource_str() for req in self._value])
self._compare_value: Optional[str] = None
@property
def value(self) -> object:
"""The value of the attribute"""
return self._value
@property
def compare_value(self) -> str:
"""The string representation of the value, which can be used for comparison"""
self.METHOD_NAME()
return self._compare_value
def METHOD_NAME(self) -> None:
"""Generate a value that can be used for comparison"""
if self._compare_value is not None:
return
if self.value is None:
self._compare_value = ""
elif isinstance(self.value, (dict, list)):
self._compare_value = json.dumps(self.value, indent=4, sort_keys=True)
else:
self._compare_value = str(self.value)
def compare(self, other: "Attribute") -> Optional[AttributeDiff]:
"""Compare this value with other. Other is considered the original value"""
if self.compare_value == other.compare_value:
return None
diff = AttributeDiff(
from_value=other.value,
to_value=self.value,
from_value_compare=other.compare_value,
to_value_compare=self.compare_value,
)
return diff
def added(self) -> AttributeDiff:
"""Return an attribute diff as if this attribute is newly added"""
return AttributeDiff(
from_value=None,
to_value=self.value,
from_value_compare="",
to_value_compare=self.compare_value,
)
def removed(self) -> AttributeDiff:
"""Return an attribute diff as if this attribute is removed in a later version"""
return AttributeDiff(
from_value=self.value,
to_value=None,
from_value_compare=self.compare_value,
to_value_compare="",
)
class Resource:
def __init__(self, resource_id: ResourceIdStr, attributes: Dict[str, object]) -> None:
self.resource_id = resource_id
self._attributes = {name: Attribute(name, value) for name, value in attributes.items() if name != "version"}
def compare(self, other: "Resource") -> Optional[ResourceDiff]:
"""Compare this resource with another: check which attributes are added, modified and removed.
The other resource is considered to be the original"""
other_attributes = set(other._attributes.keys())
our_attributes = set(self._attributes.keys())
diff = {}
# Generate the diff for each attribute
for added in our_attributes - other_attributes:
diff[added] = self._attributes[added].added()
for removed in other_attributes - our_attributes:
diff[removed] = other._attributes[removed].removed()
for name in other_attributes.intersection(our_attributes):
attr_diff = self._attributes[name].compare(other._attributes[name])
if attr_diff is not None:
diff[name] = attr_diff
if diff:
return ResourceDiff(resource_id=self.resource_id, attributes=diff, status=ResourceDiffStatus.modified)
return None
def added(self) -> ResourceDiff:
"""Return a diff as if this resource is newly added"""
return ResourceDiff(
resource_id=self.resource_id,
attributes={name: attr.added() for name, attr in self._attributes.items()},
status=ResourceDiffStatus.added,
)
def removed(self) -> ResourceDiff:
"""Return a diff as if this resource is removed"""
return ResourceDiff(
resource_id=self.resource_id,
attributes={name: attr.removed() for name, attr in self._attributes.items()},
status=ResourceDiffStatus.deleted,
)
def unmodified(self) -> ResourceDiff:
"""Return a diff as if this resource is not modified"""
return ResourceDiff(
resource_id=self.resource_id,
attributes={},
status=ResourceDiffStatus.unmodified,
)
class Version:
def __init__(self, resources: Dict[ResourceIdStr, Resource]) -> None:
self._resources = resources
def get_resource_set(self) -> Set[ResourceIdStr]:
"""The names of the resources in this version"""
return set(self._resources.keys())
def generate_diff(self, other: "Version", include_unmodified: bool = False) -> List[ResourceDiff]:
"""Compare this version with another: check which resources are added, removed and modified.
The other version is considered to be the original."""
our_set = self.get_resource_set()
other_set = other.get_resource_set()
result: List[ResourceDiff] = []
added = list(our_set - other_set)
removed = list(other_set - our_set)
result.extend(self._resources[x].added() for x in added)
result.extend(other._resources[x].removed() for x in removed)
intersect = our_set.intersection(other_set)
for res in intersect:
# generate diff for each resource
cmp = self._resources[res].compare(other._resources[res])
if cmp:
result.append(cmp)
elif include_unmodified:
result.append(self._resources[res].unmodified())
return sorted(result, key=lambda r: r.resource_id)
def generate_diff(
from_version_resources: Dict[ResourceIdStr, Resource],
to_version_resources: Dict[ResourceIdStr, Resource],
include_unmodified: bool = False,
) -> List[ResourceDiff]:
"""Generate a diff of two sets of resources, describing what has changed between them
:param from_version_resources: The resources that are considered the starting point for comparison
:param to_version_resources: The resources that are considered the target for comparison
:param include_unmodified: If set to true,
resources that haven't changed between the versions will be included in the results
"""
from_state = Version(from_version_resources)
to_state = Version(to_version_resources)
return to_state.generate_diff(from_state, include_unmodified) |
298,538 | execute | from pysys.basetest import BaseTest
import time
import re
import json
"""
Validate tedge-mapper-collectd messages that are published
on tedge/measurements
Given a configured system
When we start the collectd with sudo in the background
When we start the tedge-mapper-collectd with sudo in the background
When we start tedge sub with sudo in the background
Wait for couple of seconds to publish couple of batch of messages
Then we kill tedge sub with sudo as it is running with a different user account
Then we validate the messages in the output of tedge sub,
"""
class MonitoringWithCollectd(BaseTest):
def setup(self):
self.js_msg = ""
self.cpu_cnt = 0
self.memory_cnt = 0
self.time_cnt = 0
self.disk_cnt = 0
self.tedge = "/usr/bin/tedge"
self.sudo = "/usr/bin/sudo"
collectd = self.startProcess(
command=self.sudo,
arguments=["systemctl", "start", "collectd"],
stdouterr="collectd",
)
collectd_mapper = self.startProcess(
command=self.sudo,
arguments=["systemctl", "start", "tedge-mapper-collectd"],
stdouterr="collectd_mapper",
)
self.addCleanupFunction(self.monitoring_cleanup)
def METHOD_NAME(self):
time.sleep(0.1)
sub = self.startProcess(
command=self.sudo,
arguments=[self.tedge, "mqtt", "sub", "--no-topic", "tedge/#"],
stdouterr="tedge_sub",
background=True,
)
sub = self.startProcess(
command=self.sudo,
arguments=[self.tedge, "mqtt", "sub", "collectd/#"],
stdouterr="collectd_sub",
background=True,
)
# Wait for a small amount of time to give tedge sub time
# to initialize and capture couple of batches of messages
# that are published by tedge-mapper-collectd.
time.sleep(12)
# Kill the subscriber process explicitly with sudo as PySys does
# not have the rights to do it
kill = self.startProcess(
command=self.sudo,
arguments=["killall", "tedge"],
stdouterr="kill_out",
)
def validate(self):
self.assertGrep("tedge_sub.out", r"time|cpu|memory|df-root")
self.assertThat(
"collectd_msg_validation_result == expected_result",
collectd_msg_validation_result=self.validate_json(),
expected_result=True,
)
def validate_json(self):
f = open(self.output + "/tedge_sub.out", "r")
lines = f.readlines()
for line in lines:
self.js_msg = json.loads(line)
if not self.validate_cpu():
reason = "cpu stat validation failed in message: " + str(line)
self.abort(False, reason)
if not self.validate_time():
reason = "time validation failed in message: " + str(line)
self.abort(False, reason)
if not self.validate_memory():
reason = "memory stat validation failed in message: " + str(line)
self.abort(False, reason)
# validate disk stats if the entries are present, as the disk stats collection window is bigger
if "df-root" in self.js_msg:
if not self.validate_disk():
reason = "disk stat validation failed in message: " + str(line)
self.abort(False, reason)
if (
self.time_cnt == self.cpu_cnt == self.memory_cnt
and self.disk_cnt > 0
and self.disk_cnt <= 3
):
return True
else:
return False
def validate_cpu(self):
if self.js_msg["cpu"]:
if "percent-active" in self.js_msg["cpu"]:
self.cpu_cnt += 1
return True
else:
return False
else:
return False
def validate_time(self):
if self.js_msg["time"]:
self.time_cnt += 1
return True
else:
return False
def validate_memory(self):
if self.js_msg["memory"]:
if "percent-used" in self.js_msg["memory"]:
self.memory_cnt += 1
return True
else:
return False
else:
return False
def validate_disk(self):
if "percent_bytes-used" in self.js_msg["df-root"]:
self.disk_cnt += 1
return True
else:
return False
def monitoring_cleanup(self):
self.log.info("monitoring_cleanup")
collectd = self.startProcess(
command=self.sudo,
arguments=["systemctl", "stop", "tedge-mapper-collectd"],
stdouterr="collectd_mapper",
)
collectd = self.startProcess(
command=self.sudo,
arguments=["systemctl", "stop", "collectd"],
stdouterr="collectd",
) |
298,539 | parent | from PyQt5.QtCore import QAbstractItemModel, QModelIndex, QSize, Qt
from PyQt5.QtGui import QColor
from PyQt5.QtWidgets import QTreeView, QVBoxLayout, QWidget
from trufont.objects import settings
class KerningDictModel(QAbstractItemModel):
def __init__(self, mapping={}, METHOD_NAME=None):
super().__init__(METHOD_NAME)
self.setupModelData(mapping)
def setupModelData(self, mapping):
self.layoutAboutToBeChanged.emit()
self._data = kerns = dict()
for key, value in mapping.items():
kern1, kern2 = key
kern1 = kern1.replace("public.kern1.", "@")
kern2 = kern2.replace("public.kern2.", "@")
ct = (kern2, value)
if kern1 in kerns:
kerns[kern1].append(ct)
else:
kerns[kern1] = [ct]
self._keys = list(self._data.keys())
self.layoutChanged.emit()
def columnCount(self, METHOD_NAME):
return 2
def data(self, index, role):
if not index.isValid():
return None
if role == Qt.DisplayRole:
key = index.internalPointer()
if key is None:
if index.column() > 0:
return None
return self._keys[index.row()]
return self._data[key][index.row()][index.column()]
elif role == Qt.ForegroundRole:
data = index.data()
if isinstance(data, str) and data.startswith("@"):
return QColor(5, 5, 96)
return None
def index(self, row, column, METHOD_NAME):
if not self.hasIndex(row, column, METHOD_NAME):
return QModelIndex()
if METHOD_NAME.isValid():
key = self._keys[METHOD_NAME.row()]
return self.createIndex(row, column, key)
return self.createIndex(row, column, None)
def METHOD_NAME(self, index):
if not index.isValid():
return QModelIndex()
key = index.internalPointer()
if key is None:
return QModelIndex()
row = self._keys.index(key)
return self.createIndex(row, 0, None)
def rowCount(self, METHOD_NAME=QModelIndex()):
if not METHOD_NAME.isValid():
return len(self._data)
key = METHOD_NAME.internalPointer()
if key is not None:
# child item
return 0
key = self._keys[METHOD_NAME.row()]
return len(self._data[key])
class KerningWindow(QWidget):
def __init__(self, font, METHOD_NAME=None):
super().__init__(METHOD_NAME, Qt.Window)
self._font = font
self._font.kerning.addObserver(self, "_kerningChanged", "Kerning.Changed")
self._font.info.addObserver(self, "_fontInfoChanged", "Info.Changed")
self.kerningView = QTreeView(self)
self.kerningView.setModel(KerningDictModel(font.kerning, self.kerningView))
self.kerningView.expandAll()
metrics = self.kerningView.fontMetrics()
self.kerningView.setColumnWidth(1, 8 * metrics.width("0"))
hdr = self.kerningView.header()
hdr.setStretchLastSection(False)
hdr.setSectionResizeMode(0, hdr.Stretch)
hdr.hide()
layout = QVBoxLayout(self)
layout.addWidget(self.kerningView)
layout.setContentsMargins(0, 0, 0, 0)
self.updateWindowTitle(font=font)
self.readSettings()
def readSettings(self):
geometry = settings.kerningWindowGeometry()
if geometry:
self.restoreGeometry(geometry)
def writeSettings(self):
settings.setKerningWindowGeometry(self.saveGeometry())
def updateWindowTitle(self, title=None, font=None):
if title is None:
title = self.tr("Kerning")
if font is not None:
title = "{} – {} {}".format(
title, font.info.familyName, font.info.styleName
)
self.setWindowTitle(title)
# -------------
# Notifications
# -------------
def _kerningChanged(self, notification):
model = self.kerningView.model()
model.setupModelData(self._font.kerning)
def _fontInfoChanged(self, notification):
self.updateWindowTitle(font=self._font)
# ----------
# Qt methods
# ----------
def sizeHint(self):
return QSize(280, 460)
def moveEvent(self, event):
self.writeSettings()
resizeEvent = moveEvent
def closeEvent(self, event):
super().closeEvent(event)
if event.isAccepted():
self._font.kerning.removeObserver(self, "Kerning.Changed")
self._font.info.removeObserver(self, "Info.Changed") |
298,540 | test degenerate | try:
from . import generic as g
except BaseException:
import generic as g
class TrianglesTest(g.unittest.TestCase):
def test_barycentric(self):
for m in g.get_meshes(4):
# a simple test which gets the barycentric coordinate at each of the three
# vertices, checks to make sure the barycentric is [1,0,0] for the vertex
# and then converts back to cartesian and makes sure the original points
# are the same as the conversion and back
for method in ['cross', 'cramer']:
for i in range(3):
barycentric = g.trimesh.triangles.points_to_barycentric(
m.triangles, m.triangles[:, i], method=method)
assert (g.np.abs(barycentric -
g.np.roll([1.0, 0, 0], i)) < 1e-8).all()
points = g.trimesh.triangles.barycentric_to_points(
m.triangles, barycentric)
assert (g.np.abs(points - m.triangles[:, i]) < 1e-8).all()
def test_closest(self):
closest = g.trimesh.triangles.closest_point(
triangles=g.data['triangles']['triangles'],
points=g.data['triangles']['points'])
comparison = (closest - g.data['triangles']['closest']).all()
assert (comparison < 1e-8).all()
g.log.info('finished closest check on %d triangles', len(closest))
def test_closest_obtuse(self):
# simple triangle in the xy-plane with an obtuse corner at vertex A
ABC = g.np.float32([[0, 0, 0], [2, 0, 0], [-2, 1, 0]])
D = g.np.float32([1, -1, 0])
# ground truth: closest point from D is the center of the AB edge:
# (1,0,0)
gt_closest = g.np.float32([1, 0, 0])
tm_closest = g.trimesh.triangles.closest_point([ABC], [D])[0]
assert g.np.linalg.norm(gt_closest - tm_closest) < g.tol.merge
# create a circle of points around the triangle
# with a radius so that all points are outside of the triangle
radius = 3
nPtsOnCircle = 100
alphas = g.np.linspace(
g.np.pi / nPtsOnCircle,
g.np.pi * 2 - g.np.pi / nPtsOnCircle,
nPtsOnCircle)
ptsOnCircle = g.np.transpose(
[g.np.cos(alphas), g.np.sin(alphas), g.np.zeros(nPtsOnCircle)]) * radius
def norm(v):
return g.np.sqrt(g.np.einsum('...i,...i', v, v))
def distToLine(o, v, p):
return norm((o - p) - g.np.dot(o - p, v) * v)
def distPointToEdge(U, V, P): # edge [U, V], point P
UtoV = V - U
UtoP = P - U
VtoP = P - V
if g.np.dot(UtoV, UtoP) <= 0:
# P is 'behind' U
return norm(UtoP)
elif g.np.dot(-UtoV, VtoP) <= 0:
# P is 'behind' V
return norm(VtoP)
else:
# P is 'between' U and V
return distToLine(U, UtoV / norm(UtoV), P)
# get closest points from trimesh and compute distances to the circle
# points
tm_dists = norm(
ptsOnCircle -
g.trimesh.triangles.closest_point(
[ABC] *
nPtsOnCircle,
ptsOnCircle))
# compute naive point-to-edge distances for all points and take the min of
# the three edges
gt_dists = g.np.float32([[distPointToEdge(ABC[i], ABC[(i + 1) % 3], pt)
for i in range(3)] for pt in ptsOnCircle]).min(axis=1)
diff_dists = tm_dists - gt_dists
assert g.np.dot(diff_dists, diff_dists) < g.tol.merge
def METHOD_NAME(self):
tri = [[[0, 0, 0],
[1, 0, 0],
[-.5, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[10, 10, 0]],
[[0, 0, 0],
[0, 0, 2],
[0, 0, 2.2]],
[[0, 0, 0],
[1, 0, 0],
[0, 1, 0]]]
tri_gt = [False,
False,
False,
True]
r = g.trimesh.triangles.nondegenerate(tri)
assert len(r) == len(tri)
assert (r == tri_gt).all()
def test_angles(self):
# a zero- area triangle
tris = g.np.array(
[[[0, 0, 0], [1, 0, 0], [1, 0, 0]]], dtype=g.np.float64)
angles = g.trimesh.triangles.angles(tris)
# degenerate angles should be zero, not NaN
g.log.debug(angles)
assert g.np.allclose(angles, 0.0)
# an equilateral triangle
tris = g.np.array(
[[[-1, 0, 0],
[1, 0, 0],
[0, g.np.sqrt(3), 0]]], dtype=g.np.float64)
angles = g.trimesh.triangles.angles(tris)
# degenerate angles should be zero, not NaN
assert g.np.allclose(angles, g.np.radians(60))
# an equilateral triangle transformed into space
tris = g.trimesh.transform_points(
g.np.array(
[[-1, 0, 0],
[1, 0, 0],
[0, g.np.sqrt(3), 0]], dtype=g.np.float64),
g.trimesh.transformations.random_rotation_matrix()).reshape((-1, 3, 3))
angles = g.trimesh.triangles.angles(tris)
# all angles should be 60 degrees
assert g.np.allclose(angles, g.np.radians(60))
# an 3-4-5 right triangle
tris = g.trimesh.transform_points(
g.np.array(
[[0, 0, 0],
[3, 0, 0],
[0, 4, 0]], dtype=g.np.float64),
g.trimesh.transformations.random_rotation_matrix()).reshape((-1, 3, 3))
# get angles
angles = g.trimesh.triangles.angles(tris)
# make sure they match a 3-4-5
assert g.np.allclose(
g.np.sort(angles.ravel()),
[g.np.arcsin(3.0 / 5), g.np.arcsin(4.0 / 5), g.np.pi / 2])
if __name__ == '__main__':
g.trimesh.util.attach_to_log()
g.unittest.main() |
298,541 | predict | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based on https://github.com/nwojke/deep_sort/blob/master/deep_sort/track.py
"""
import datetime
__all__ = ['TrackState', 'Track']
class TrackState(object):
"""
Enumeration type for the single target track state. Newly created tracks are
classified as `tentative` until enough evidence has been collected. Then,
the track state is changed to `confirmed`. Tracks that are no longer alive
are classified as `deleted` to mark them for removal from the set of active
tracks.
"""
Tentative = 1
Confirmed = 2
Deleted = 3
class Track(object):
"""
A single target track with state space `(x, y, a, h)` and associated
velocities, where `(x, y)` is the center of the bounding box, `a` is the
aspect ratio and `h` is the height.
Args:
mean (ndarray): Mean vector of the initial state distribution.
covariance (ndarray): Covariance matrix of the initial state distribution.
track_id (int): A unique track identifier.
n_init (int): Number of consecutive detections before the track is confirmed.
The track state is set to `Deleted` if a miss occurs within the first
`n_init` frames.
max_age (int): The maximum number of consecutive misses before the track
state is set to `Deleted`.
cls_id (int): The category id of the tracked box.
score (float): The confidence score of the tracked box.
feature (Optional[ndarray]): Feature vector of the detection this track
originates from. If not None, this feature is added to the `features` cache.
Attributes:
hits (int): Total number of measurement updates.
age (int): Total number of frames since first occurance.
time_since_update (int): Total number of frames since last measurement
update.
state (TrackState): The current track state.
features (List[ndarray]): A cache of features. On each measurement update,
the associated feature vector is added to this list.
"""
def __init__(self,
mean,
covariance,
track_id,
n_init,
max_age,
cls_id,
score,
feature=None):
self.mean = mean
self.covariance = covariance
self.track_id = track_id
self.hits = 1
self.age = 1
self.time_since_update = 0
self.cls_id = cls_id
self.score = score
self.start_time = datetime.datetime.now()
self.state = TrackState.Tentative
self.features = []
self.feat = feature
if feature is not None:
self.features.append(feature)
self._n_init = n_init
self._max_age = max_age
def to_tlwh(self):
"""Get position in format `(top left x, top left y, width, height)`."""
ret = self.mean[:4].copy()
ret[2] *= ret[3]
ret[:2] -= ret[2:] / 2
return ret
def to_tlbr(self):
"""Get position in bounding box format `(min x, miny, max x, max y)`."""
ret = self.to_tlwh()
ret[2:] = ret[:2] + ret[2:]
return ret
def METHOD_NAME(self, kalman_filter):
"""
Propagate the state distribution to the current time step using a Kalman
filter prediction step.
"""
self.mean, self.covariance = kalman_filter.METHOD_NAME(self.mean,
self.covariance)
self.age += 1
self.time_since_update += 1
def update(self, kalman_filter, detection):
"""
Perform Kalman filter measurement update step and update the associated
detection feature cache.
"""
self.mean, self.covariance = kalman_filter.update(self.mean,
self.covariance,
detection.to_xyah())
self.features.append(detection.feature)
self.feat = detection.feature
self.cls_id = detection.cls_id
self.score = detection.score
self.hits += 1
self.time_since_update = 0
if self.state == TrackState.Tentative and self.hits >= self._n_init:
self.state = TrackState.Confirmed
def mark_missed(self):
"""Mark this track as missed (no association at the current time step).
"""
if self.state == TrackState.Tentative:
self.state = TrackState.Deleted
elif self.time_since_update > self._max_age:
self.state = TrackState.Deleted
def is_tentative(self):
"""Returns True if this track is tentative (unconfirmed)."""
return self.state == TrackState.Tentative
def is_confirmed(self):
"""Returns True if this track is confirmed."""
return self.state == TrackState.Confirmed
def is_deleted(self):
"""Returns True if this track is dead and should be deleted."""
return self.state == TrackState.Deleted |
298,542 | test stl ipv6 scan6 | #!/usr/bin/python
from .stl_general_test import CStlGeneral_Test, CTRexScenario
from trex_stl_lib.api import *
class STLIPv6_Test(CStlGeneral_Test):
"""Tests for IPv6 scan6/ping_ip """
def setUp(self):
CStlGeneral_Test.setUp(self)
if self.is_vdev:
self.skip("We don't know what to expect with vdev.")
print('')
self.stl_trex.reset()
self.stl_trex.set_service_mode()
def tearDown(self):
CStlGeneral_Test.tearDown(self)
self.stl_trex.set_service_mode(enabled = False)
def conf_ipv6(self, tx_enabled, rx_enabled, tx_src = None, rx_src = None):
tx, rx = CTRexScenario.ports_map['bi'][0]
self.stl_trex.conf_ipv6(tx, tx_enabled, tx_src)
self.stl_trex.conf_ipv6(rx, rx_enabled, rx_src)
return tx, rx
def filter_ping_results(self, results):
return list(filter(lambda result: result['status'] == 'success', results))
def test_stl_ipv6_ping(self):
ping_count = 5
expected_replies = 4 # allow one loss
results = self.stl_trex.ping_ip(src_port = 0, dst_ip = 'ff02::1', count = ping_count)
good_replies = len(self.filter_ping_results(results))
if self.is_loopback:
# negative test, loopback
if good_replies > 0:
self.fail('We should not respond to IPv6 in loopback at this stage, bug!\nOutput: %s' % results)
else:
print('No IPv6 replies in loopback as expected.')
else:
# positive test, DUT
if good_replies < expected_replies:
self.fail('Got only %s good replies out of %s.\nOutput: %s' % (good_replies, ping_count, results))
else:
print('Got replies from DUT as expected.')
# negative test, non-existing IP
dst_ip = '1234::1234'
results = self.stl_trex.ping_ip(src_port = 0, dst_ip = dst_ip, count = 2)
good_replies = len(self.filter_ping_results(results))
if good_replies > 0:
self.fail('We have answers from non-existing IPv6 %s, bug!\nOutput: %s' % (dst_ip, results))
else:
print('Got no replies from non-existing IPv6 %s as expected.' % dst_ip)
def test_ipv6_ping_linux_based_stack(self):
'''Testing Linux-based stack ping in different scenarios'''
if not (self.is_linux_stack and self.is_loopback):
self.skip('Relevant only for Linux-based stack in loopback')
rx_src = '1111::1112'
try:
for tx_enabled in (True, False):
for rx_enabled in (True, False):
for tx_src in ('1111::1111', None):
print('tx_enabled: %5s, rx_enabled: %5s, tx_src: %10s' % (tx_enabled, rx_enabled, tx_src))
tx, rx = self.conf_ipv6(tx_enabled, rx_enabled, tx_src, rx_src)
results = self.stl_trex.ping_ip(src_port = tx, dst_ip = rx_src, count = 2)
replies = self.filter_ping_results(results)
if rx_enabled:
assert replies, 'Got no replies while RX port has IPv6 enabled'
else:
assert not replies, 'Got replies while RX port has IPv6 disabled: %s' % replies
finally:
self.conf_ipv6(False, False)
def METHOD_NAME(self):
results = self.stl_trex.scan6(ports = 0)
if self.is_loopback:
# negative test, loopback
if results[0]:
self.fail("Scan6 found devices in loopback, we don't answer to IPv6 now, bug!\nOutput: %s" % results)
else:
print('No devices found in loopback as expected.')
else:
# positive test, DUT
if len(results[0]) > 1:
self.fail('Found more than one device at port 0: %s' % results)
elif len(results[0]) == 1:
print('Found one device as expected:\n{type:10} - {mac:20} - {ipv6}'.format(**results[0][0]))
else:
self.fail('Did not find IPv6 devices.')
|
298,543 | add summary | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generally useful utility functions."""
from __future__ import print_function
import codecs
import collections
import json
import math
import os
import sys
import time
from distutils import version
import numpy as np
import tensorflow as tf
def check_tensorflow_version():
min_tf_version = "1.4.0-dev20171024"
if (version.LooseVersion(tf.__version__) <
version.LooseVersion(min_tf_version)):
raise EnvironmentError("Tensorflow version must >= %s" % min_tf_version)
def safe_exp(value):
"""Exponentiation with catching of overflow error."""
try:
ans = math.exp(value)
except OverflowError:
ans = float("inf")
return ans
def print_time(s, start_time):
"""Take a start time, print elapsed duration, and return a new time."""
print("%s, time %ds, %s." % (s, (time.time() - start_time), time.ctime()))
sys.stdout.flush()
return time.time()
def print_out(s, f=None, new_line=True):
"""Similar to print but with support to flush and output to a file."""
if isinstance(s, bytes):
s = s.decode("utf-8")
if f:
f.write(s.encode("utf-8"))
if new_line:
f.write(b"\n")
# stdout
out_s = s.encode("utf-8")
if not isinstance(out_s, str):
out_s = out_s.decode("utf-8")
print(out_s, end="", file=sys.stdout)
if new_line:
sys.stdout.write("\n")
sys.stdout.flush()
def print_hparams(hparams, skip_patterns=None, header=None):
"""Print hparams, can skip keys based on pattern."""
if header: print_out("%s" % header)
values = hparams.values()
for key in sorted(values.keys()):
if not skip_patterns or all(
[skip_pattern not in key for skip_pattern in skip_patterns]):
print_out(" %s=%s" % (key, str(values[key])))
def load_hparams(model_dir):
"""Load hparams from an existing model directory."""
hparams_file = os.path.join(model_dir, "hparams")
if tf.gfile.Exists(hparams_file):
print_out("# Loading hparams from %s" % hparams_file)
with codecs.getreader("utf-8")(tf.gfile.GFile(hparams_file, "rb")) as f:
try:
hparams_values = json.load(f)
hparams = tf.contrib.training.HParams(**hparams_values)
except ValueError:
print_out(" can't load hparams file")
return None
return hparams
else:
return None
def maybe_parse_standard_hparams(hparams, hparams_path):
"""Override hparams values with existing standard hparams config."""
if hparams_path and tf.gfile.Exists(hparams_path):
print_out("# Loading standard hparams from %s" % hparams_path)
with codecs.getreader("utf-8")(tf.gfile.GFile(hparams_path, "rb")) as f:
hparams.parse_json(f.read())
return hparams
def save_hparams(out_dir, hparams):
"""Save hparams."""
hparams_file = os.path.join(out_dir, "hparams")
print_out(" saving hparams to %s" % hparams_file)
with codecs.getwriter("utf-8")(tf.gfile.GFile(hparams_file, "wb")) as f:
f.write(hparams.to_json(indent=4, sort_keys=True))
def debug_tensor(s, msg=None, summarize=10):
"""Print the shape and value of a tensor at test time. Return a new tensor."""
if not msg:
msg = s.name
return tf.Print(s, [tf.shape(s), s], msg + " ", summarize=summarize)
def METHOD_NAME(summary_writer, global_step, tag, value):
"""Add a new summary to the current summary_writer.
Useful to log things that are not part of the training graph, e.g., tag=BLEU.
"""
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
summary_writer.METHOD_NAME(summary, global_step)
def get_config_proto(log_device_placement=False, allow_soft_placement=True,
num_intra_threads=0, num_inter_threads=0):
# GPU options:
# https://www.tensorflow.org/versions/r0.10/how_tos/using_gpu/index.html
config_proto = tf.ConfigProto(
log_device_placement=log_device_placement,
allow_soft_placement=allow_soft_placement)
config_proto.gpu_options.allow_growth = True
# CPU threads options
if num_intra_threads:
config_proto.intra_op_parallelism_threads = num_intra_threads
if num_inter_threads:
config_proto.inter_op_parallelism_threads = num_inter_threads
return config_proto
def format_text(words):
"""Convert a sequence words into sentence."""
if (not hasattr(words, "__len__") and # for numpy array
not isinstance(words, collections.Iterable)):
words = [words]
return b" ".join(words)
def format_bpe_text(symbols, delimiter=b"@@"):
"""Convert a sequence of bpe words into sentence."""
words = []
word = b""
if isinstance(symbols, str):
symbols = symbols.encode()
delimiter_len = len(delimiter)
for symbol in symbols:
if len(symbol) >= delimiter_len and symbol[-delimiter_len:] == delimiter:
word += symbol[:-delimiter_len]
else: # end of a word
word += symbol
words.append(word)
word = b""
return b" ".join(words)
def format_spm_text(symbols):
"""Decode a text in SPM (https://github.com/google/sentencepiece) format."""
return u"".join(format_text(symbols).decode("utf-8").split()).replace(
u"\u2581", u" ").strip().encode("utf-8") |
298,544 | make contiguous gpu | import logging
import numpy as np
from edt import edt
from porespy.tools import get_tqdm, get_border
from porespy.tools import Results
__all__ = [
"ibip_gpu",
]
tqdm = get_tqdm()
logger = logging.getLogger(__name__)
def ibip_gpu(im, dt=None, inlets=None, maxiter=10000): # pragma: no cover
"""
Performs invasion percolation on given image using iterative image
dilation on GPU.
Parameters
----------
im : array_like
Boolean array with ``True`` values indicating void voxels. If a
standard numpy array is passed, it is converted to a cupy array.
dt : array_like, optional
The distance transform of ``im``. If a standard numpy array is
passed, it is converted to a cupy array.
inlets : array_like, optional
Boolean array with ``True`` values indicating where the invading
fluid is injected from. If ``None``, all faces will be used.
If a standard numpy array is passed, it is converted to a cupy
array.
maxiter : int, optional
The number of steps to apply before stopping. The default is to
run for 10,000 steps which is almost certain to reach completion
if the image is smaller than about 250-cubed.
Returns
-------
results : Results object
A custom object with the following two arrays as attributes:
'inv_sequence'
An ndarray the same shape as ``im`` with each voxel labelled by
the sequence at which it was invaded.
'inv_size'
An ndarray the same shape as ``im`` with each voxel labelled by
the ``inv_size`` at which was filled.
"""
import cupy as cp
from cupyx.scipy import ndimage as cndi
im_gpu = cp.array(im)
dt = edt(cp.asnumpy(im)) if dt is None else dt
dt_gpu = cp.array(dt)
inlets = get_border(shape=im.shape) if inlets is None else inlets
inlets_gpu = cp.array(inlets)
bd_gpu = cp.copy(inlets_gpu > 0)
dt_gpu = dt_gpu.astype(int)
# Alternative to _ibip
inv_gpu = -1*((~im_gpu).astype(int))
sizes_gpu = -1*((~im_gpu).astype(int))
strel_gpu = ball_gpu if im_gpu.ndim == 3 else disk_gpu
for step in tqdm(range(1, maxiter)):
temp_gpu = cndi.binary_dilation(input=bd_gpu,
structure=strel_gpu(1, smooth=False))
edge_gpu = temp_gpu * (dt_gpu > 0)
if ~cp.any(edge_gpu):
logger.info('No more accessible invasion sites found')
break
# Find the maximum value of the dt underlaying the new edge
r_max_gpu = dt_gpu[edge_gpu].max()
# Find all values of the dt with that size
dt_thresh_gpu = dt_gpu >= r_max_gpu
# Insert the disk/sphere
pt_gpu = cp.where(edge_gpu * dt_thresh_gpu) # will be used later in updating bd
# Update inv image
bi_dial_gpu = cndi.binary_dilation(input=edge_gpu*dt_thresh_gpu,
structure=strel_gpu(r_max_gpu.item()))
bi_dial_step_gpu = bi_dial_gpu * step
inv_prev_gpu = cp.copy(inv_gpu)
mask_inv_prev_gpu = ~(inv_prev_gpu > 0)
dial_single_gpu = mask_inv_prev_gpu * bi_dial_step_gpu
inv_gpu = inv_prev_gpu + dial_single_gpu
# Update size image
bi_dial_size_gpu = bi_dial_gpu * r_max_gpu
sizes_prev_gpu = cp.copy(sizes_gpu)
mask_sizes_prev_gpu = ~(sizes_prev_gpu > 0)
dial_single_size_gpu = mask_sizes_prev_gpu * bi_dial_size_gpu
sizes_gpu = sizes_prev_gpu + dial_single_size_gpu
# Update boundary image with newly invaded points
bd_gpu[pt_gpu] = True
dt_gpu[pt_gpu] = 0
if step == (maxiter - 1): # If max_iters reached, end loop
logger.info('Maximum number of iterations reached')
break
temp_gpu = inv_gpu == 0
inv_gpu[~im_gpu] = 0
inv_gpu[temp_gpu] = -1
inv_seq_gpu = METHOD_NAME(im=inv_gpu)
temp_gpu = sizes_gpu == 0
sizes_gpu[~im_gpu] = 0
sizes_gpu[temp_gpu] = -1
inv_sequence = cp.asnumpy(inv_seq_gpu)
inv_size = cp.asnumpy(sizes_gpu)
results = Results()
results.inv_sequence = inv_sequence
results.inv_size = inv_size
return results
def rankdata_gpu(im_arr): # pragma: no cover
"""
GPU alternative to scipy's rankdata using 'dense' method.
Assign ranks to data, dealing with ties appropriately.
Parameters
----------
im_arr : cupy ndarray
Input image.
Returns
-------
dense : cupy ndarray
An array of length equal to the size of im_arr, containing rank
scores.
"""
import cupy as cp
arr = cp.ravel(im_arr)
sorter = cp.argsort(arr)
inv = cp.empty(sorter.size, dtype=cp.intp)
inv[sorter] = cp.arange(sorter.size, dtype=cp.intp)
arr = arr[sorter]
obs = cp.r_[True, arr[1:] != arr[:-1]]
dense = obs.cumsum()[inv]
return dense
def METHOD_NAME(im): # pragma: no cover
"""
Take an image with arbitrary greyscale values and adjust them to
ensure all values fall in a contiguous range starting at 0.
Parameters
----------
im : cupy ndarray
Input array containing greyscale values
Returns
-------
im_new : cupy ndarray
Array the same size as ``im`` but with all values in contiguous
order.
"""
shape = im.shape
im_flat = im.flatten()
mask_neg = im_flat < 0
im_neg = -rankdata_gpu(-im_flat[mask_neg])
mask_pos = im_flat > 0
im_pos = rankdata_gpu(im_flat[mask_pos])
im_flat[mask_pos] = im_pos
im_flat[mask_neg] = im_neg
im_new = np.reshape(im_flat, shape)
return im_new
def ball_gpu(radius, smooth=True): # pragma: no cover
"""
Generates a ball-shaped structuring element.
Parameters
----------
radius : int
The radius of the ball-shaped structuring element.
smooth : bool, optional
Indicates whether the balls should include the nibs (``False``) on
the surface or not (``True``). The default is ``True``.
Returns
-------
cupy ndarray
The structuring element where elements of the neighborhood
are 1 and 0 otherwise.
"""
import cupy as cp
n = 2 * radius + 1
Z, Y, X = cp.mgrid[-radius:radius:n * 1j,
-radius:radius:n * 1j,
-radius:radius:n * 1j]
s = X ** 2 + Y ** 2 + Z ** 2
if smooth:
radius = radius - 0.001
return s <= radius * radius
def disk_gpu(radius, smooth=True): # pragma: no cover
"""
Generates a flat, disk-shaped structuring element.
Parameters
----------
radius : int
The radius of the disk-shaped structuring element.
smooth : bool, optional
Indicates whether the disks should include the nibs (``False``) on
the surface or not (``True``). The default is ``True``.
Returns
-------
cupy ndarray
The structuring element where elements of the neighborhood are
1 and 0 otherwise.
"""
import cupy as cp
L = cp.arange(-radius, radius + 1)
X, Y = cp.meshgrid(L, L)
if smooth:
radius = radius - 0.001
return (X ** 2 + Y ** 2) <= radius ** 2
if __name__ == '__main__':
import porespy as ps
im = ps.generators.blobs(shape=[200, 200])
out = ps.filters.ibip_gpu(im=im) |
298,545 | export sources | from conan import ConanFile
from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout
from conan.tools.files import (
apply_conandata_patches, collect_libs, copy, export_conandata_patches, get,
replace_in_file, rmdir
)
import os
required_conan_version = ">=1.53.0"
class CppRestSDKConan(ConanFile):
name = "cpprestsdk"
description = "A project for cloud-based client-server communication in native code using a modern asynchronous " \
"C++ API design"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/Microsoft/cpprestsdk"
topics = ("rest", "client", "http", "https")
license = "MIT"
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"with_websockets": [True, False],
"with_compression": [True, False],
"pplx_impl": ["win", "winpplx"],
"http_client_impl": ["winhttp", "asio"],
"http_listener_impl": ["httpsys", "asio"],
}
default_options = {
"shared": False,
"fPIC": True,
"with_websockets": True,
"with_compression": True,
"pplx_impl": "win",
"http_client_impl": "winhttp",
"http_listener_impl": "httpsys",
}
def METHOD_NAME(self):
export_conandata_patches(self)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
else:
del self.options.pplx_impl
del self.options.http_client_impl
del self.options.http_listener_impl
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
def layout(self):
cmake_layout(self, src_folder="src")
def requirements(self):
self.requires("boost/1.83.0")
self.requires("openssl/[>=1.1 <4]")
if self.options.with_compression:
self.requires("zlib/1.2.13")
if self.options.with_websockets:
self.requires("websocketpp/0.8.2")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
# upstream CMakeLists.txt sets BUILD_SHARED_LIBS as a CACHE variable
# TODO: remove if required_conan_version = ">=1.54.0"
tc.variables["BUILD_SHARED_LIBS"] = self.options.shared
tc.variables["BUILD_TESTS"] = False
tc.variables["BUILD_SAMPLES"] = False
tc.variables["WERROR"] = False
tc.variables["CPPREST_EXCLUDE_WEBSOCKETS"] = not self.options.with_websockets
tc.variables["CPPREST_EXCLUDE_COMPRESSION"] = not self.options.with_compression
if self.options.get_safe("pplx_impl"):
tc.variables["CPPREST_PPLX_IMPL"] = self.options.pplx_impl
if self.options.get_safe("http_client_impl"):
tc.variables["CPPREST_HTTP_CLIENT_IMPL"] = self.options.http_client_impl
if self.options.get_safe("http_listener_impl"):
tc.variables["CPPREST_HTTP_LISTENER_IMPL"] = self.options.http_listener_impl
tc.generate()
deps = CMakeDeps(self)
deps.generate()
def _patch_clang_libcxx(self):
if self.settings.compiler == 'clang' and str(self.settings.compiler.libcxx) in ['libstdc++', 'libstdc++11']:
replace_in_file(self, os.path.join(self.source_folder, 'Release', 'CMakeLists.txt'),
'libc++', 'libstdc++')
def build(self):
apply_conandata_patches(self)
self._patch_clang_libcxx()
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
copy(self, "license.txt", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder)
cmake = CMake(self)
cmake.install()
rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
rmdir(self, os.path.join(self.package_folder, "lib", "cpprestsdk"))
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "cpprestsdk")
# cpprestsdk_boost_internal
self.cpp_info.components["cpprestsdk_boost_internal"].set_property("cmake_target_name", "cpprestsdk::cpprestsdk_boost_internal")
self.cpp_info.components["cpprestsdk_boost_internal"].includedirs = []
self.cpp_info.components["cpprestsdk_boost_internal"].requires = ["boost::boost"]
# cpprestsdk_openssl_internal
self.cpp_info.components["cpprestsdk_openssl_internal"].set_property("cmake_target_name", "cpprestsdk::cpprestsdk_openssl_internal")
self.cpp_info.components["cpprestsdk_openssl_internal"].includedirs = []
self.cpp_info.components["cpprestsdk_openssl_internal"].requires = ["openssl::openssl"]
# cpprest
self.cpp_info.components["cpprest"].set_property("cmake_target_name", "cpprestsdk::cpprest")
self.cpp_info.components["cpprest"].libs = collect_libs(self)
self.cpp_info.components["cpprest"].requires = ["cpprestsdk_boost_internal", "cpprestsdk_openssl_internal"]
if self.settings.os == "Linux":
self.cpp_info.components["cpprest"].system_libs.append("pthread")
elif self.settings.os == "Windows":
if self.options.get_safe("http_client_impl") == "winhttp":
self.cpp_info.components["cpprest"].system_libs.append("winhttp")
if self.options.get_safe("http_listener_impl") == "httpsys":
self.cpp_info.components["cpprest"].system_libs.append("httpapi")
self.cpp_info.components["cpprest"].system_libs.append("bcrypt")
if self.options.get_safe("pplx_impl") == "winpplx":
self.cpp_info.components["cpprest"].defines.append("CPPREST_FORCE_PPLX=1")
if self.options.get_safe("http_client_impl") == "asio":
self.cpp_info.components["cpprest"].defines.append("CPPREST_FORCE_HTTP_CLIENT_ASIO")
if self.options.get_safe("http_listener_impl") == "asio":
self.cpp_info.components["cpprest"].defines.append("CPPREST_FORCE_HTTP_LISTENER_ASIO")
elif self.settings.os == "Macos":
self.cpp_info.components["cpprest"].frameworks.extend(["CoreFoundation", "Security"])
if not self.options.shared:
self.cpp_info.components["cpprest"].defines.extend(["_NO_ASYNCRTIMP", "_NO_PPLXIMP"])
# cpprestsdk_zlib_internal
if self.options.with_compression:
self.cpp_info.components["cpprestsdk_zlib_internal"].set_property("cmake_target_name", "cpprestsdk::cpprestsdk_zlib_internal")
self.cpp_info.components["cpprestsdk_zlib_internal"].includedirs = []
self.cpp_info.components["cpprestsdk_zlib_internal"].requires = ["zlib::zlib"]
self.cpp_info.components["cpprest"].requires.append("cpprestsdk_zlib_internal")
# cpprestsdk_websocketpp_internal
if self.options.with_websockets:
self.cpp_info.components["cpprestsdk_websocketpp_internal"].set_property("cmake_target_name", "cpprestsdk::cpprestsdk_websocketpp_internal")
self.cpp_info.components["cpprestsdk_websocketpp_internal"].includedirs = []
self.cpp_info.components["cpprestsdk_websocketpp_internal"].requires = ["websocketpp::websocketpp"]
self.cpp_info.components["cpprest"].requires.append("cpprestsdk_websocketpp_internal") |
298,546 | fix files | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class servicedirectoryCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'create_endpoint': ('parent', 'endpoint_id', 'endpoint', ),
'create_namespace': ('parent', 'namespace_id', 'namespace', ),
'create_service': ('parent', 'service_id', 'service', ),
'delete_endpoint': ('name', ),
'delete_namespace': ('name', ),
'delete_service': ('name', ),
'get_endpoint': ('name', ),
'get_iam_policy': ('resource', 'options', ),
'get_namespace': ('name', ),
'get_service': ('name', ),
'list_endpoints': ('parent', 'page_size', 'page_token', 'filter', 'order_by', ),
'list_namespaces': ('parent', 'page_size', 'page_token', 'filter', 'order_by', ),
'list_services': ('parent', 'page_size', 'page_token', 'filter', 'order_by', ),
'resolve_service': ('name', 'max_endpoints', 'endpoint_filter', ),
'set_iam_policy': ('resource', 'policy', 'update_mask', ),
'test_iam_permissions': ('resource', 'permissions', ),
'update_endpoint': ('endpoint', 'update_mask', ),
'update_namespace': ('namespace', 'update_mask', ),
'update_service': ('service', 'update_mask', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: a.keyword.value not in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def METHOD_NAME(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=servicedirectoryCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the servicedirectory client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
METHOD_NAME(input_dir, output_dir) |
298,547 | prepare request | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2023-01-31"] = kwargs.pop("api_version", _params.pop("api-version", "2023-01-31"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.ManagedIdentity/operations")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.msi.v2023_01_31.ManagedServiceIdentityClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.Operation"]:
"""Lists available operations for the Microsoft.ManagedIdentity provider.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Operation or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.msi.v2023_01_31.models.Operation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2023-01-31"] = kwargs.pop("api_version", _params.pop("api-version", "2023-01-31"))
cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def METHOD_NAME(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = METHOD_NAME(next_link)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/providers/Microsoft.ManagedIdentity/operations"} |
298,548 | validate tree name | """Custom field validators for InvenTree."""
import re
from decimal import Decimal, InvalidOperation
from django.conf import settings
from django.core import validators
from django.core.exceptions import FieldDoesNotExist, ValidationError
from django.utils.translation import gettext_lazy as _
import pint
from jinja2 import Template
from moneyed import CURRENCIES
import InvenTree.conversion
def validate_physical_units(unit):
"""Ensure that a given unit is a valid physical unit."""
unit = unit.strip()
# Ignore blank units
if not unit:
return
ureg = InvenTree.conversion.get_unit_registry()
try:
ureg(unit)
except AttributeError:
raise ValidationError(_('Invalid physical unit'))
except pint.errors.UndefinedUnitError:
raise ValidationError(_('Invalid physical unit'))
def validate_currency_code(code):
"""Check that a given code is a valid currency code."""
if code not in CURRENCIES:
raise ValidationError(_('Not a valid currency code'))
def allowable_url_schemes():
"""Return the list of allowable URL schemes.
In addition to the default schemes allowed by Django,
the install configuration file (config.yaml) can specify
extra schemas
"""
# Default schemes
schemes = ['http', 'https', 'ftp', 'ftps']
extra = settings.EXTRA_URL_SCHEMES
for e in extra:
if e.lower() not in schemes:
schemes.append(e.lower())
return schemes
class AllowedURLValidator(validators.URLValidator):
"""Custom URL validator to allow for custom schemes."""
def __call__(self, value):
"""Validate the URL."""
self.schemes = allowable_url_schemes()
super().__call__(value)
def validate_purchase_order_reference(value):
"""Validate the 'reference' field of a PurchaseOrder."""
from order.models import PurchaseOrder
# If we get to here, run the "default" validation routine
PurchaseOrder.validate_reference_field(value)
def validate_sales_order_reference(value):
"""Validate the 'reference' field of a SalesOrder."""
from order.models import SalesOrder
# If we get to here, run the "default" validation routine
SalesOrder.validate_reference_field(value)
def METHOD_NAME(value):
"""Placeholder for legacy function used in migrations."""
...
def validate_overage(value):
"""Validate that a BOM overage string is properly formatted.
An overage string can look like:
- An integer number ('1' / 3 / 4)
- A decimal number ('0.123')
- A percentage ('5%' / '10 %')
"""
value = str(value).lower().strip()
# First look for a simple numerical value
try:
i = Decimal(value)
if i < 0:
raise ValidationError(_("Overage value must not be negative"))
# Looks like a number
return True
except (ValueError, InvalidOperation):
pass
# Now look for a percentage value
if value.endswith('%'):
v = value[:-1].strip()
# Does it look like a number?
try:
f = float(v)
if f < 0:
raise ValidationError(_("Overage value must not be negative"))
elif f > 100:
raise ValidationError(_("Overage must not exceed 100%"))
return True
except ValueError:
pass
raise ValidationError(
_("Invalid value for overage")
)
def validate_part_name_format(value):
"""Validate part name format.
Make sure that each template container has a field of Part Model
"""
# Make sure that the field_name exists in Part model
from part.models import Part
jinja_template_regex = re.compile('{{.*?}}')
field_name_regex = re.compile('(?<=part\\.)[A-z]+')
for jinja_template in jinja_template_regex.findall(str(value)):
# make sure at least one and only one field is present inside the parser
field_names = field_name_regex.findall(jinja_template)
if len(field_names) < 1:
raise ValidationError({
'value': 'At least one field must be present inside a jinja template container i.e {{}}'
})
for field_name in field_names:
try:
Part._meta.get_field(field_name)
except FieldDoesNotExist:
raise ValidationError({
'value': f'{field_name} does not exist in Part Model'
})
# Attempt to render the template with a dummy Part instance
p = Part(name='test part', description='some test part')
try:
Template(value).render({'part': p})
except Exception as exc:
raise ValidationError({
'value': str(exc)
})
return True |
298,549 | test qualname | import os
import platform
import sys
from unittest.mock import Mock, patch
import pytest
from celery.utils.imports import (NotAPackage, cwd_in_path, find_module, gen_task_name, module_file, qualname,
reload_from_cwd)
def test_find_module():
def imp_side_effect(module):
if module == 'foo':
return None
else:
raise ImportError(module)
assert find_module('celery')
imp = Mock()
imp.side_effect = imp_side_effect
with pytest.raises(NotAPackage) as exc_info:
find_module('foo.bar.baz', imp=imp)
assert exc_info.value.args[0] == 'foo'
assert find_module('celery.worker.request')
def test_find_module_legacy_namespace_package(tmp_path, monkeypatch):
monkeypatch.chdir(str(tmp_path))
(tmp_path / 'pkg' / 'foo').mkdir(parents=True)
(tmp_path / 'pkg' / '__init__.py').write_text(
'from pkgutil import extend_path\n'
'__path__ = extend_path(__path__, __name__)\n')
(tmp_path / 'pkg' / 'foo' / '__init__.py').write_text('')
(tmp_path / 'pkg' / 'foo' / 'bar.py').write_text('')
with patch.dict(sys.modules):
for modname in list(sys.modules):
if modname == 'pkg' or modname.startswith('pkg.'):
del sys.modules[modname]
with pytest.raises(ImportError):
find_module('pkg.missing')
with pytest.raises(ImportError):
find_module('pkg.foo.missing')
assert find_module('pkg.foo.bar')
with pytest.raises(NotAPackage) as exc_info:
find_module('pkg.foo.bar.missing')
assert exc_info.value.args[0] == 'pkg.foo.bar'
def test_find_module_pep420_namespace_package(tmp_path, monkeypatch):
monkeypatch.chdir(str(tmp_path))
(tmp_path / 'pkg' / 'foo').mkdir(parents=True)
(tmp_path / 'pkg' / 'foo' / '__init__.py').write_text('')
(tmp_path / 'pkg' / 'foo' / 'bar.py').write_text('')
with patch.dict(sys.modules):
for modname in list(sys.modules):
if modname == 'pkg' or modname.startswith('pkg.'):
del sys.modules[modname]
with pytest.raises(ImportError):
find_module('pkg.missing')
with pytest.raises(ImportError):
find_module('pkg.foo.missing')
assert find_module('pkg.foo.bar')
with pytest.raises(NotAPackage) as exc_info:
find_module('pkg.foo.bar.missing')
assert exc_info.value.args[0] == 'pkg.foo.bar'
def METHOD_NAME():
Class = type('Fox', (object,), {
'__module__': 'quick.brown',
})
assert qualname(Class) == 'quick.brown.Fox'
assert qualname(Class()) == 'quick.brown.Fox'
def test_reload_from_cwd(patching):
reload = patching('celery.utils.imports.reload')
reload_from_cwd('foo')
reload.assert_called()
def test_reload_from_cwd_custom_reloader():
reload = Mock()
reload_from_cwd('foo', reload)
reload.assert_called()
def test_module_file():
m1 = Mock()
m1.__file__ = '/opt/foo/xyz.pyc'
assert module_file(m1) == '/opt/foo/xyz.py'
m2 = Mock()
m2.__file__ = '/opt/foo/xyz.py'
assert module_file(m1) == '/opt/foo/xyz.py'
def test_cwd_in_path(tmp_path, monkeypatch):
now_cwd = os.getcwd()
t = str(tmp_path) + "/foo"
os.mkdir(t)
os.chdir(t)
with cwd_in_path():
assert os.path.exists(t) is True
if sys.platform == "win32" or "Windows" in platform.platform():
# If it is a Windows server, other processes cannot delete the current working directory being used by celery
# . If you want to delete it, you need to terminate the celery process. If it is a Linux server, the current
# working directory of celery can be deleted by other processes.
pass
else:
os.rmdir(t)
with cwd_in_path():
assert os.path.exists(t) is False
os.chdir(now_cwd)
class test_gen_task_name:
def test_no_module(self):
app = Mock()
app.name == '__main__'
assert gen_task_name(app, 'foo', 'axsadaewe') |
298,550 | test l10n en us | # python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <dr.prodigy.github@gmail.com> (c) 2017-2023
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date
from holidays.countries.cyprus import Cyprus, CY, CYP
from tests.common import TestCase
class TestCyprus(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass(Cyprus)
def test_country_aliases(self):
self.assertCountryAliases(Cyprus, CY, CYP)
def test_fixed_holidays(self):
years = range(2000, 2025)
for y in years:
fdays = (
(date(y, 1, 1), "Πρωτοχρονιά"),
(date(y, 1, 6), "Θεοφάνεια"),
(date(y, 3, 25), "Εικοστή Πέμπτη Μαρτίου"),
(date(y, 4, 1), "1η Απριλίου"),
(date(y, 5, 1), "Εργατική Πρωτομαγιά"),
(date(y, 8, 15), "Κοίμηση της Θεοτόκου"),
(date(y, 10, 28), "Ημέρα του Όχι"),
(date(y, 12, 25), "Χριστούγεννα"),
(date(y, 12, 26), "Δεύτερη μέρα Χριστουγέννων"),
)
for d, dstr in fdays:
self.assertIn(d, self.holidays)
self.assertIn(dstr, self.holidays[d])
def test_cy_clean_monday(self):
checkdates = (
date(2018, 2, 19),
date(2019, 3, 11),
date(2020, 3, 2),
date(2021, 3, 15),
date(2022, 3, 7),
date(2023, 2, 27),
date(2024, 3, 18),
)
for d in checkdates:
self.assertIn(d, self.holidays)
self.assertIn("Καθαρά Δευτέρα", self.holidays[d])
def test_cy_good_friday(self):
checkdates = (
date(2018, 4, 6),
date(2019, 4, 26),
date(2020, 4, 17),
date(2021, 4, 30),
date(2022, 4, 22),
date(2023, 4, 14),
date(2024, 5, 3),
)
for d in checkdates:
self.assertIn(d, self.holidays)
self.assertIn("Μεγάλη Παρασκευή", self.holidays[d])
def test_cy_easter_sunday(self):
checkdates = (
date(2018, 4, 8),
date(2019, 4, 28),
date(2020, 4, 19),
date(2021, 5, 2),
date(2022, 4, 24),
date(2023, 4, 16),
date(2024, 5, 5),
)
for d in checkdates:
self.assertIn(d, self.holidays)
self.assertIn("Κυριακή του Πάσχα", self.holidays[d])
def test_cy_easter_monday(self):
checkdates = (
date(2018, 4, 9),
date(2019, 4, 29),
date(2020, 4, 20),
date(2021, 5, 3),
date(2022, 4, 25),
date(2023, 4, 17),
date(2024, 5, 6),
)
for d in checkdates:
self.assertIn(d, self.holidays)
self.assertIn("Δευτέρα του Πάσχα", self.holidays[d])
def test_cy_monday_of_the_holy_spirit(self):
checkdates = (
date(2018, 5, 28),
date(2019, 6, 17),
date(2020, 6, 8),
date(2021, 6, 21),
date(2022, 6, 13),
date(2023, 6, 5),
date(2024, 6, 24),
)
for d in checkdates:
self.assertIn(d, self.holidays)
self.assertIn("Δευτέρα του Αγίου Πνεύματος", self.holidays[d])
def test_l10n_default(self):
self.assertLocalizedHolidays(
("2022-01-01", "Πρωτοχρονιά"),
("2022-01-06", "Θεοφάνεια"),
("2022-03-07", "Καθαρά Δευτέρα"),
("2022-03-25", "Εικοστή Πέμπτη Μαρτίου"),
("2022-04-01", "1η Απριλίου"),
("2022-04-22", "Μεγάλη Παρασκευή"),
("2022-04-24", "Κυριακή του Πάσχα"),
("2022-04-25", "Δευτέρα του Πάσχα"),
("2022-05-01", "Εργατική Πρωτομαγιά"),
("2022-06-13", "Δευτέρα του Αγίου Πνεύματος"),
("2022-08-15", "Κοίμηση της Θεοτόκου"),
("2022-10-01", "Ημέρα Ανεξαρτησίας της Κύπρου"),
("2022-10-28", "Ημέρα του Όχι"),
("2022-12-24", "Παραμονή Χριστουγέννων"),
("2022-12-25", "Χριστούγεννα"),
("2022-12-26", "Δεύτερη μέρα Χριστουγέννων"),
)
def METHOD_NAME(self):
self.assertLocalizedHolidays(
"en_US",
("2022-01-01", "New Year's Day"),
("2022-01-06", "Epiphany"),
("2022-03-07", "Clean Monday"),
("2022-03-25", "Greek Independence Day"),
("2022-04-01", "Cyprus National Day"),
("2022-04-22", "Good Friday"),
("2022-04-24", "Easter Sunday"),
("2022-04-25", "Easter Monday"),
("2022-05-01", "Labor Day"),
("2022-06-13", "Monday of the Holy Spirit"),
("2022-08-15", "Assumption of Mary"),
("2022-10-01", "Cyprus Independence Day"),
("2022-10-28", "Ochi Day"),
("2022-12-24", "Christmas Eve"),
("2022-12-25", "Christmas Day"),
("2022-12-26", "Day After Christmas"),
) |
298,551 | test watch forum | from kitsune.kbforums.events import NewPostEvent, NewThreadEvent
from kitsune.kbforums.models import Thread
from kitsune.kbforums.tests import KBForumTestCase, ThreadFactory
from kitsune.sumo.tests import get, post
from kitsune.users.tests import UserFactory, add_permission
from kitsune.wiki.tests import ApprovedRevisionFactory, DocumentFactory
class ThreadTests(KBForumTestCase):
"""Test thread views."""
def METHOD_NAME(self):
"""Watch then unwatch a forum."""
u = UserFactory()
self.client.login(username=u.username, password="testpass")
d = ApprovedRevisionFactory().document
post(self.client, "wiki.discuss.watch_forum", {"watch": "yes"}, args=[d.slug])
assert NewThreadEvent.is_notifying(u, d)
# NewPostEvent is not notifying.
t = ThreadFactory(document=d)
p = t.new_post(creator=t.creator, content="test")
assert not NewPostEvent.is_notifying(u, p)
post(self.client, "wiki.discuss.watch_forum", {"watch": "no"}, args=[d.slug])
assert not NewThreadEvent.is_notifying(u, d)
def test_watch_thread(self):
"""Watch then unwatch a thread."""
u = UserFactory()
self.client.login(username=u.username, password="testpass")
t = ThreadFactory()
post(
self.client,
"wiki.discuss.watch_thread",
{"watch": "yes"},
args=[t.document.slug, t.id],
)
assert NewPostEvent.is_notifying(u, t)
# NewThreadEvent is not notifying.
assert not NewThreadEvent.is_notifying(u, t.document)
post(
self.client, "wiki.discuss.watch_thread", {"watch": "no"}, args=[t.document.slug, t.id]
)
assert not NewPostEvent.is_notifying(u, t)
def test_edit_thread(self):
"""Changing thread title works."""
u = UserFactory()
self.client.login(username=u.username, password="testpass")
d = DocumentFactory()
t = ThreadFactory(title="Sticky Thread", document=d, creator=u)
post(
self.client, "wiki.discuss.edit_thread", {"title": "A new title"}, args=[d.slug, t.id]
)
edited_t = d.thread_set.get(pk=t.id)
self.assertEqual("Sticky Thread", t.title)
self.assertEqual("A new title", edited_t.title)
def test_edit_thread_moderator(self):
"""Editing post as a moderator works."""
u = UserFactory()
add_permission(u, Thread, "change_thread")
t = ThreadFactory(title="Sticky Thread")
d = t.document
self.client.login(username=u.username, password="testpass")
self.assertEqual("Sticky Thread", t.title)
r = post(
self.client, "wiki.discuss.edit_thread", {"title": "new title"}, args=[d.slug, t.id]
)
self.assertEqual(200, r.status_code)
edited_t = Thread.objects.get(pk=t.id)
self.assertEqual("new title", edited_t.title)
def test_disallowed_404(self):
"""If document.allow_discussion is false, should return 404."""
u = UserFactory()
self.client.login(username=u.username, password="testpass")
doc = ApprovedRevisionFactory(document__allow_discussion=False).document
def check(url):
response = get(self.client, url, args=[doc.slug])
st = response.status_code
self.assertEqual(404, st, "%s was %s, not 404" % (url, st))
check("wiki.discuss.threads")
check("wiki.discuss.new_thread")
check("wiki.discuss.threads.feed")
class ThreadPermissionsTests(KBForumTestCase):
def setUp(self):
super(ThreadPermissionsTests, self).setUp()
self.doc = DocumentFactory()
self.u = UserFactory()
self.thread = ThreadFactory(document=self.doc, creator=self.u)
self.post = self.thread.new_post(creator=self.thread.creator, content="foo")
# Login for testing 403s
u2 = UserFactory()
self.client.login(username=u2.username, password="testpass")
def tearDown(self):
self.client.logout()
super(ThreadPermissionsTests, self).tearDown()
def test_edit_thread_403(self):
"""Editing a thread without permissions returns 403."""
response = get(
self.client, "wiki.discuss.edit_thread", args=[self.doc.slug, self.thread.id]
)
self.assertEqual(403, response.status_code)
def test_edit_locked_thread_403(self):
"""Editing a locked thread returns 403."""
t = ThreadFactory(document=self.doc, creator=self.u, is_locked=True)
response = get(self.client, "wiki.discuss.edit_thread", args=[self.doc.slug, t.id])
self.assertEqual(403, response.status_code)
def test_delete_thread_403(self):
"""Deleting a thread without permissions returns 403."""
response = get(
self.client, "wiki.discuss.delete_thread", args=[self.doc.slug, self.thread.id]
)
self.assertEqual(403, response.status_code)
def test_sticky_thread_405(self):
"""Marking a thread sticky with a HTTP GET returns 405."""
response = get(
self.client, "wiki.discuss.sticky_thread", args=[self.doc.slug, self.thread.id]
)
self.assertEqual(405, response.status_code)
def test_sticky_thread_403(self):
"""Marking a thread sticky without permissions returns 403."""
response = post(
self.client, "wiki.discuss.sticky_thread", args=[self.doc.slug, self.thread.id]
)
self.assertEqual(403, response.status_code)
def test_locked_thread_403(self):
"""Marking a thread locked without permissions returns 403."""
response = post(
self.client, "wiki.discuss.lock_thread", args=[self.doc.slug, self.thread.id]
)
self.assertEqual(403, response.status_code)
def test_locked_thread_405(self):
"""Marking a thread locked via a GET instead of a POST request."""
response = get(
self.client, "wiki.discuss.lock_thread", args=[self.doc.slug, self.thread.id]
)
self.assertEqual(405, response.status_code)
def test_post_edit_403(self):
"""Editing a post without permissions returns 403."""
response = get(
self.client,
"wiki.discuss.edit_post",
args=[self.doc.slug, self.thread.id, self.post.id],
)
self.assertEqual(403, response.status_code)
def test_post_delete_403(self):
"""Deleting a post without permissions returns 403."""
response = get(
self.client,
"wiki.discuss.delete_post",
args=[self.doc.slug, self.thread.id, self.post.id],
)
self.assertEqual(403, response.status_code) |
298,552 | initialize constraints on dnorm problem | # -*- coding: utf-8 -*-
"""
This module implements internal-use functions for semidefinite programming.
"""
import collections
import functools
import numpy as np
import scipy.sparse as sp
# Conditionally import CVXPY
try:
import cvxpy
__all__ = ["dnorm_problem", "dnorm_sparse_problem"]
except ImportError:
cvxpy = None
__all__ = []
from .operators import swap
Complex = collections.namedtuple("Complex", ["re", "im"])
def _complex_var(rows=1, cols=1, name=None):
return Complex(
re=cvxpy.Variable((rows, cols), name=(name + "_re") if name else None),
im=cvxpy.Variable((rows, cols), name=(name + "_im") if name else None),
)
def _make_constraints(*rhos):
"""
Create constraints to ensure definied density operators.
"""
# rhos traces are 1
constraints = [cvxpy.trace(rho.re) == 1 for rho in rhos]
# rhos are Hermitian
for rho in rhos:
constraints += [rho.re == rho.re.T] + [rho.im == -rho.im.T]
# Non negative
constraints += [
cvxpy.bmat([[rho.re, -rho.im], [rho.im, rho.re]]) >> 0 for rho in rhos
]
return constraints
def _arr_to_complex(A):
if np.iscomplex(A).any():
return Complex(re=A.real, im=A.imag)
return Complex(re=A, im=np.zeros_like(A))
def _kron(A, B):
if isinstance(A, np.ndarray):
A = _arr_to_complex(A)
if isinstance(B, np.ndarray):
B = _arr_to_complex(B)
return Complex(
re=(cvxpy.kron(A.re, B.re) - cvxpy.kron(A.im, B.im)),
im=(cvxpy.kron(A.im, B.re) + cvxpy.kron(A.re, B.im)),
)
def _conj(W, A):
U, V = W.re, W.im
A, B = A.re, A.im
return Complex(
re=(U @ A @ U.T - U @ B @ V.T - V @ A @ V.T - V @ B @ U.T),
im=(U @ A @ V.T + U @ B @ U.T + V @ A @ U.T - V @ B @ V.T),
)
@functools.lru_cache
def METHOD_NAME(dim):
# Start assembling constraints and variables.
constraints = []
# Make a complex variable for X.
X = _complex_var(dim**2, dim**2, "X")
# Make complex variables for rho0 and rho1.
rho0 = _complex_var(dim, dim, "rho0")
rho1 = _complex_var(dim, dim, "rho1")
constraints += _make_constraints(rho0, rho1)
# Finally, add the tricky positive semidefinite constraint.
# Since we're using column-stacking, but Watrous used row-stacking,
# we need to swap the order in Rho0 and Rho1. This is not straightforward,
# as CVXPY requires that the constant be the first argument. To solve this,
# We conjugate by SWAP.
W = swap(dim, dim).full()
W = Complex(re=W.real, im=W.imag)
Rho0 = _conj(W, _kron(np.eye(dim), rho0))
Rho1 = _conj(W, _kron(np.eye(dim), rho1))
Y = cvxpy.bmat(
[
[Rho0.re, X.re, -Rho0.im, -X.im],
[X.re.T, Rho1.re, X.im.T, -Rho1.im],
[Rho0.im, X.im, Rho0.re, X.re],
[-X.im.T, Rho1.im, X.re.T, Rho1.re],
]
)
constraints += [Y >> 0]
return X, constraints
def dnorm_problem(dim):
"""
Creade the cvxpy ``Problem`` for the dnorm metric using dense arrays
"""
X, constraints = METHOD_NAME(dim)
Jr = cvxpy.Parameter((dim**2, dim**2))
Ji = cvxpy.Parameter((dim**2, dim**2))
# The objective, however, depends on J.
objective = cvxpy.Maximize(cvxpy.trace(Jr.T @ X.re + Ji.T @ X.im))
problem = cvxpy.Problem(objective, constraints)
return problem, Jr, Ji
def dnorm_sparse_problem(dim, J_dat):
"""
Creade the cvxpy ``Problem`` for the dnorm metric using sparse arrays
"""
X, constraints = METHOD_NAME(dim)
J_val = J_dat.tocoo()
def adapt_sparse_params(A_val, dim):
# This detour is needed as pointed out in cvxgrp/cvxpy#1159, as cvxpy
# can not solve with parameters that aresparse matrices directly.
# Solutions have to be made through calling cvxpy.reshape on
# the original sparse matrix.
side_size = dim**2
A_nnz = cvxpy.Parameter(A_val.nnz)
A_data = np.ones(A_nnz.size)
A_rows = A_val.row * side_size + A_val.col
A_cols = np.arange(A_nnz.size)
# We are pushing the data on the location of the nonzero elements
# to the nonzero rows of A_indexer
A_Indexer = sp.coo_matrix(
(A_data, (A_rows, A_cols)), shape=(side_size**2, A_nnz.size)
)
# We get finaly the sparse matrix A which we wanted
A = cvxpy.reshape(A_Indexer @ A_nnz, (side_size, side_size), order="C")
A_nnz.value = A_val.data
return A
Jr_val = J_val.real
Jr = adapt_sparse_params(Jr_val, dim)
Ji_val = J_val.imag
Ji = adapt_sparse_params(Ji_val, dim)
# The objective, however, depends on J.
objective = cvxpy.Maximize(cvxpy.trace(Jr.T @ X.re + Ji.T @ X.im))
problem = cvxpy.Problem(objective, constraints)
return problem |
298,553 | test number unit of measurement | # Mixins for testing number entities
from ..helpers import assert_device_properties_set
class BasicNumberTests:
def setUpBasicNumber(
self,
dps,
subject,
max,
min=0,
step=1,
mode="auto",
scale=1,
unit=None,
testdata=None,
device_class=None,
):
self.basicNumber = subject
self.basicNumberDps = dps
self.basicNumberMin = min
self.basicNumberMax = max
self.basicNumberStep = step
self.basicNumberMode = mode
self.basicNumberScale = scale
self.basicNumberUnit = unit
self.basicNumberTestData = testdata
self.basicNumberDevClass = device_class
def test_number_min_value(self):
self.assertEqual(self.basicNumber.native_min_value, self.basicNumberMin)
def test_number_max_value(self):
self.assertEqual(self.basicNumber.native_max_value, self.basicNumberMax)
def test_number_step(self):
self.assertEqual(self.basicNumber.native_step, self.basicNumberStep)
def test_number_mode(self):
self.assertEqual(self.basicNumber.mode, self.basicNumberMode)
def METHOD_NAME(self):
self.assertEqual(
self.basicNumber.native_unit_of_measurement, self.basicNumberUnit
)
def test_number_device_class(self):
self.assertEqual(
self.basicNumber.device_class,
self.basicNumberDevClass,
)
def test_number_value(self):
if self.basicNumberTestData:
val = self.basicNumberTestData[0]
expected = self.basicNumberTestData[1]
else:
expected = min(
max(self.basicNumberMin, self.basicNumberStep), self.basicNumberMax
)
val = expected * self.basicNumberScale
self.dps[self.basicNumberDps] = val
self.assertEqual(self.basicNumber.native_value, expected)
async def test_number_set_value(self):
if self.basicNumberTestData:
dps_val = self.basicNumberTestData[0]
val = self.basicNumberTestData[1]
else:
val = min(
max(self.basicNumberMin, self.basicNumberStep),
self.basicNumberMax,
)
dps_val = val * self.basicNumberScale
async with assert_device_properties_set(
self.basicNumber._device, {self.basicNumberDps: dps_val}
):
await self.basicNumber.async_set_native_value(val)
def test_number_extra_state_attributes(self):
self.assertEqual(self.basicNumber.extra_state_attributes, {})
class MultiNumberTests:
def setUpMultiNumber(self, numbers):
self.multiNumber = {}
self.multiNumberDps = {}
self.multiNumberMin = {}
self.multiNumberMax = {}
self.multiNumberStep = {}
self.multiNumberMode = {}
self.multiNumberScale = {}
self.multiNumberUnit = {}
self.multiNumberTestData = {}
self.multiNumberDevClass = {}
for n in numbers:
name = n.get("name")
subject = self.entities.get(name)
if subject is None:
raise AttributeError(f"No number for {name} found.")
self.multiNumber[name] = subject
self.multiNumberDps[name] = n.get("dps")
self.multiNumberMin[name] = n.get("min", 0)
self.multiNumberMax[name] = n.get("max")
self.multiNumberStep[name] = n.get("step", 1)
self.multiNumberMode[name] = n.get("mode", "auto")
self.multiNumberScale[name] = n.get("scale", 1)
self.multiNumberUnit[name] = n.get("unit", None)
self.multiNumberTestData[name] = n.get("testdata", None)
self.multiNumberDevClass[name] = n.get("device_class", None)
def test_multi_number_min_value(self):
for key, subject in self.multiNumber.items():
self.assertEqual(
subject.native_min_value,
self.multiNumberMin[key],
f"{key} min value mismatch",
)
def test_multi_number_max_value(self):
for key, subject in self.multiNumber.items():
self.assertEqual(
subject.native_max_value,
self.multiNumberMax[key],
f"{key} max value mismatch",
)
def test_multi_number_step(self):
for key, subject in self.multiNumber.items():
self.assertEqual(
subject.native_step,
self.multiNumberStep[key],
f"{key} step mismatch",
)
def test_multi_number_mode(self):
for key, subject in self.multiNumber.items():
self.assertEqual(
subject.mode,
self.multiNumberMode[key],
f"{key} mode mismatch",
)
def test_multi_number_unit_of_measurement(self):
for key, subject in self.multiNumber.items():
self.assertEqual(
subject.native_unit_of_measurement,
self.multiNumberUnit[key],
f"{key} unit mismatch",
)
def test_multi_number_device_class(self):
for key, subject in self.multiNumber.items():
self.assertEqual(
subject.device_class,
self.multiNumberDevClass[key],
f"{key} device class mismatch",
)
def test_multi_number_value(self):
for key, subject in self.multiNumber.items():
if self.multiNumberTestData[key]:
val = self.multiNumberTestData[key][1]
dps_val = self.multiNumberTestData[key][0]
else:
val = min(
max(self.multiNumberMin[key], self.multiNumberStep[key]),
self.multiNumberMax[key],
)
dps_val = val * self.multiNumberScale[key]
self.dps[self.multiNumberDps[key]] = dps_val
self.assertEqual(subject.native_value, val, f"{key} value mismatch")
async def test_multi_number_set_value(self):
for key, subject in self.multiNumber.items():
if self.multiNumberTestData[key]:
val = self.multiNumberTestData[key][1]
dps_val = self.multiNumberTestData[key][0]
else:
val = min(
max(self.multiNumberMin[key], self.multiNumberStep[key]),
self.multiNumberMax[key],
)
dps_val = val * self.multiNumberScale[key]
async with assert_device_properties_set(
subject._device,
{self.multiNumberDps[key]: dps_val},
f"{key} failed to set correct value",
):
await subject.async_set_native_value(val)
def test_multi_number_extra_state_attributes(self):
for key, subject in self.multiNumber.items():
self.assertEqual(
subject.extra_state_attributes,
{},
f"{key} extra_state_attributes mismatch",
) |
298,554 | apply | """
Copyright 2013 Steven Diamond, 2017 Akshay Agrawal, 2017 Robin Verschueren
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Tuple
from cvxpy import problems
from cvxpy.expressions import cvxtypes
from cvxpy.expressions.expression import Expression
from cvxpy.problems.objective import Minimize
from cvxpy.reductions.canonicalization import Canonicalization
from cvxpy.reductions.dcp2cone.canonicalizers import CANON_METHODS as cone_canon_methods
from cvxpy.reductions.inverse_data import InverseData
from cvxpy.reductions.qp2quad_form.canonicalizers import QUAD_CANON_METHODS as quad_canon_methods
class Dcp2Cone(Canonicalization):
"""Reduce DCP problems to a conic form.
This reduction takes as input (minimization) DCP problems and converts
them into problems with affine or quadratic objectives and conic
constraints whose arguments are affine.
"""
def __init__(self, problem=None, quad_obj: bool = False) -> None:
super(Canonicalization, self).__init__(problem=problem)
self.cone_canon_methods = cone_canon_methods
self.quad_canon_methods = quad_canon_methods
self.quad_obj = quad_obj
def accepts(self, problem):
"""A problem is accepted if it is a minimization and is DCP.
"""
return type(problem.objective) == Minimize and problem.is_dcp()
def METHOD_NAME(self, problem):
"""Converts a DCP problem to a conic form.
"""
if not self.accepts(problem):
raise ValueError("Cannot reduce problem to cone program")
inverse_data = InverseData(problem)
canon_objective, canon_constraints = self.canonicalize_tree(
problem.objective, True)
for constraint in problem.constraints:
# canon_constr is the constraint rexpressed in terms of
# its canonicalized arguments, and aux_constr are the constraints
# generated while canonicalizing the arguments of the original
# constraint
canon_constr, aux_constr = self.canonicalize_tree(
constraint, False)
canon_constraints += aux_constr + [canon_constr]
inverse_data.cons_id_map.update({constraint.id: canon_constr.id})
new_problem = problems.problem.Problem(canon_objective,
canon_constraints)
return new_problem, inverse_data
def canonicalize_tree(self, expr, affine_above: bool) -> Tuple[Expression, list]:
"""Recursively canonicalize an Expression.
Parameters
----------
expr : The expression tree to canonicalize.
affine_above : The path up to the root node is all affine atoms.
Returns
-------
A tuple of the canonicalized expression and generated constraints.
"""
# TODO don't copy affine expressions?
if type(expr) == cvxtypes.partial_problem():
canon_expr, constrs = self.canonicalize_tree(
expr.args[0].objective.expr, False)
for constr in expr.args[0].constraints:
canon_constr, aux_constr = self.canonicalize_tree(constr, False)
constrs += [canon_constr] + aux_constr
else:
affine_atom = type(expr) not in self.cone_canon_methods
canon_args = []
constrs = []
for arg in expr.args:
canon_arg, c = self.canonicalize_tree(arg, affine_atom and affine_above)
canon_args += [canon_arg]
constrs += c
canon_expr, c = self.canonicalize_expr(expr, canon_args, affine_above)
constrs += c
return canon_expr, constrs
def canonicalize_expr(self, expr, args, affine_above: bool) -> Tuple[Expression, list]:
"""Canonicalize an expression, w.r.t. canonicalized arguments.
Parameters
----------
expr : The expression tree to canonicalize.
args : The canonicalized arguments of expr.
affine_above : The path up to the root node is all affine atoms.
Returns
-------
A tuple of the canonicalized expression and generated constraints.
"""
# Constant trees are collapsed, but parameter trees are preserved.
if isinstance(expr, Expression) and (
expr.is_constant() and not expr.parameters()):
return expr, []
if self.quad_obj and affine_above and type(expr) in self.quad_canon_methods:
# Special case for power.
if type(expr) == cvxtypes.power() and not expr._quadratic_power():
return self.cone_canon_methods[type(expr)](expr, args)
else:
return self.quad_canon_methods[type(expr)](expr, args)
if type(expr) in self.cone_canon_methods:
return self.cone_canon_methods[type(expr)](expr, args)
return expr.copy(args), [] |
298,555 | test not a fuzzer invalid name | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for fuzzers.utils."""
import os
import shutil
import tempfile
import unittest
from clusterfuzz._internal.bot.fuzzers import utils
from clusterfuzz._internal.system import environment
from clusterfuzz._internal.tests.test_libs import helpers as test_helpers
class IsFuzzTargetLocalTest(unittest.TestCase):
"""is_fuzz_target_local tests."""
def setUp(self):
test_helpers.patch_environ(self)
self.temp_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.temp_dir, ignore_errors=True)
def _create_file(self, name, contents=b''):
path = os.path.join(self.temp_dir, name)
with open(path, 'wb') as f:
f.write(contents)
return path
def METHOD_NAME(self):
path = self._create_file('abc$_fuzzer', contents=b'LLVMFuzzerTestOneInput')
self.assertFalse(utils.is_fuzz_target_local(path))
def test_not_a_fuzzer_blocklisted_name(self):
path = self._create_file(
'jazzer_driver', contents=b'LLVMFuzzerTestOneInput')
self.assertFalse(utils.is_fuzz_target_local(path))
def test_not_a_fuzzer_without_extension(self):
path = self._create_file('abc', contents=b'anything')
self.assertFalse(utils.is_fuzz_target_local(path))
def test_not_a_fuzzer_with_extension(self):
path = self._create_file('abc.dict', contents=b'LLVMFuzzerTestOneInput')
self.assertFalse(utils.is_fuzz_target_local(path))
def test_not_a_fuzzer_with_extension_and_suffix(self):
path = self._create_file(
'abc_fuzzer.dict', contents=b'LLVMFuzzerTestOneInput')
self.assertFalse(utils.is_fuzz_target_local(path))
def test_fuzzer_posix(self):
path = self._create_file('abc_fuzzer', contents=b'anything')
self.assertTrue(utils.is_fuzz_target_local(path))
def test_fuzzer_win(self):
path = self._create_file('abc_fuzzer.exe', contents=b'anything')
self.assertTrue(utils.is_fuzz_target_local(path))
def test_fuzzer_py(self):
path = self._create_file('abc_fuzzer.par', contents=b'anything')
self.assertTrue(utils.is_fuzz_target_local(path))
def test_fuzzer_not_exist(self):
self.assertFalse(utils.is_fuzz_target_local('/not_exist_fuzzer'))
def test_fuzzer_without_suffix(self):
path = self._create_file(
'abc', contents=b'anything\nLLVMFuzzerTestOneInput')
self.assertTrue(utils.is_fuzz_target_local(path))
def test_fuzzer_with_name_regex_match(self):
environment.set_value('FUZZER_NAME_REGEX', '.*_custom$')
path = self._create_file('a_custom', contents=b'anything')
self.assertTrue(utils.is_fuzz_target_local(path))
def test_fuzzer_with_file_string_and_without_name_regex_match(self):
environment.set_value('FUZZER_NAME_REGEX', '.*_custom$')
path = self._create_file(
'nomatch', contents=b'anything\nLLVMFuzzerTestOneInput')
self.assertFalse(utils.is_fuzz_target_local(path))
def test_fuzzer_without_file_string_and_without_name_regex_match(self):
environment.set_value('FUZZER_NAME_REGEX', '.*_custom$')
path = self._create_file('nomatch', contents=b'anything')
self.assertFalse(utils.is_fuzz_target_local(path))
def test_fuzzer_with_fuzzer_name_and_without_name_regex_match(self):
environment.set_value('FUZZER_NAME_REGEX', '.*_custom$')
path = self._create_file(
'a_fuzzer', contents=b'anything\nLLVMFuzzerTestOneInput')
self.assertTrue(utils.is_fuzz_target_local(path))
def test_file_handle(self):
"""Test with a file handle."""
path = self._create_file(
'abc', contents=b'anything\nLLVMFuzzerTestOneInput')
with open(path, 'rb') as f:
self.assertTrue(utils.is_fuzz_target_local('name', f))
class GetSupportingFileTest(unittest.TestCase):
"""Tests for get_supporting_file."""
def test_no_extension(self):
"""Test no extension."""
self.assertEqual('/a/b.labels', utils.get_supporting_file(
'/a/b', '.labels'))
def test_unknown_extension(self):
"""Test unknown extension."""
self.assertEqual('/a/b.c.labels',
utils.get_supporting_file('/a/b.c', '.labels'))
def test_exe(self):
"""Test exe extension."""
self.assertEqual('/a/b.labels',
utils.get_supporting_file('/a/b.exe', '.labels'))
def test_par(self):
"""Test par extension."""
self.assertEqual('/a/b.labels',
utils.get_supporting_file('/a/b.par', '.labels')) |
298,556 | test serialize unknown identifier | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for
# license information.
# -------------------------------------------------------------------------
import unittest
from azure.communication.chat._communication_identifier_serializer import serialize_identifier, deserialize_identifier
from azure.communication.chat._generated.models import(
CommunicationIdentifierModel,
MicrosoftTeamsUserIdentifierModel,
CommunicationUserIdentifierModel,
PhoneNumberIdentifierModel
)
from azure.communication.chat._shared.models import(
CommunicationUserIdentifier,
CommunicationCloudEnvironment,
UnknownIdentifier,
PhoneNumberIdentifier,
MicrosoftTeamsUserIdentifier
)
class CommunicationUserIdentifierSerializerTest(unittest.TestCase):
def setUp(self):
self.testPhoneNumber="+12223334444"
self.testUserModel = CommunicationUserIdentifierModel(id="User Id")
self.testPhoneNumberModel = PhoneNumberIdentifierModel(value=self.testPhoneNumber)
self.testTeamsUserModel = MicrosoftTeamsUserIdentifierModel(user_id="Microsoft Teams User Id",
is_anonymous=True,
cloud=CommunicationCloudEnvironment.PUBLIC)
def test_serialize_communication_user(self):
communication_identifier_model = serialize_identifier(
CommunicationUserIdentifier("an id")
)
assert communication_identifier_model['communication_user']['id'] is "an id"
def test_deserialize_communication_user(self):
communication_identifier_actual = deserialize_identifier(
CommunicationIdentifierModel(
raw_id="an id",
communication_user=self.testUserModel
)
)
communication_identifier_expected = CommunicationUserIdentifier("an id")
assert isinstance(communication_identifier_actual, CommunicationUserIdentifier)
assert communication_identifier_actual.properties['id'] == communication_identifier_expected.properties['id']
def METHOD_NAME(self):
unknown_identifier_model = serialize_identifier(
UnknownIdentifier("an id")
)
assert unknown_identifier_model['raw_id'] is "an id"
def test_deserialize_unknown_identifier(self):
unknown_identifier_actual = deserialize_identifier(
CommunicationIdentifierModel(
raw_id="an id"
)
)
unknown_identifier_expected = UnknownIdentifier("an id")
assert isinstance(unknown_identifier_actual, UnknownIdentifier)
assert unknown_identifier_actual.raw_id == unknown_identifier_expected.raw_id
def test_serialize_phone_number(self):
phone_number_identifier_model = serialize_identifier(
PhoneNumberIdentifier("phonenumber")
)
assert phone_number_identifier_model['phone_number']['value'] is "phonenumber"
def test_deserialize_phone_number(self):
phone_number_identifier_actual = deserialize_identifier(
CommunicationIdentifierModel(
raw_id="someid",
phone_number=self.testPhoneNumberModel
)
)
phone_number_identifier_expected = PhoneNumberIdentifier(self.testPhoneNumber, raw_id="someid")
assert isinstance(phone_number_identifier_actual, PhoneNumberIdentifier)
assert phone_number_identifier_actual.properties['value'] == phone_number_identifier_expected.properties['value']
assert phone_number_identifier_actual.raw_id == phone_number_identifier_expected.raw_id
def test_serialize_teams_user(self):
teams_user_identifier_model = serialize_identifier(
MicrosoftTeamsUserIdentifier(
user_id="teamsid",
cloud=CommunicationCloudEnvironment.PUBLIC,
raw_id="someid"
)
)
assert teams_user_identifier_model['microsoft_teams_user']['user_id'] is "teamsid"
assert teams_user_identifier_model['microsoft_teams_user']['cloud'] is CommunicationCloudEnvironment.PUBLIC
assert teams_user_identifier_model['raw_id'] is "someid"
def test_deserialize_teams_user(self):
teams_user_identifier_actual = deserialize_identifier(
CommunicationIdentifierModel(
raw_id="someid",
microsoft_teams_user=self.testTeamsUserModel
)
)
teams_user_identifier_expected = MicrosoftTeamsUserIdentifier(
raw_id="someid",
user_id="Microsoft Teams User Id",
cloud=CommunicationCloudEnvironment.PUBLIC,
is_anonymous=True
)
assert isinstance(teams_user_identifier_actual, MicrosoftTeamsUserIdentifier)
assert teams_user_identifier_actual.raw_id == teams_user_identifier_expected.raw_id
assert teams_user_identifier_actual.properties['user_id'] == teams_user_identifier_expected.properties['user_id']
assert teams_user_identifier_actual.properties['is_anonymous'] == teams_user_identifier_expected.properties['is_anonymous']
assert teams_user_identifier_actual.properties['cloud'] == teams_user_identifier_expected.properties['cloud']
def test_serialize_foreign_throws(self):
foreign_obj = "Foreign object"
self.assertRaises(
TypeError,
lambda : serialize_identifier(foreign_obj)
)
if __name__ == "__main__":
unittest.main( |
298,557 | old dataset records index name | # coding=utf-8
# Copyright 2021-present, the Recognai S.L. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Common environment vars / settings
"""
import logging
import os
import re
import warnings
from pathlib import Path
from typing import List, Optional
from urllib.parse import urlparse
from pydantic import BaseSettings, Field, root_validator, validator
from argilla._constants import DEFAULT_MAX_KEYWORD_LENGTH, DEFAULT_TELEMETRY_KEY
class Settings(BaseSettings):
"""
Main application settings. The pydantic BaseSettings class makes
accessible environment variables by setting attributes.
See <https://pydantic-docs.helpmanual.io/usage/settings/>
only_bulk_api: (ONLY_BULK_API env var)
If True, activate only bulk and search endpoints
elasticseach: (ELASTICSEARCH env var)
The elasticsearch endpoint for datasets persistence
cors_origins: (CORS_ORIGINS env var)
List of host patterns for CORS origin access
docs_enabled: True
If True, enable openapi docs endpoint at /api/docs
es_records_index_shards:
Configures the number of shards for dataset records index creation. Default=1
es_records_index_replicas:
Configures the number of shard replicas for dataset records index creation. Default=0
disable_es_index_template_creation: (DISABLE_ES_INDEX_TEMPLATE_CREATION env var)
Allowing advanced users to create their own es index settings and mappings. Default=False
"""
__LOGGER__ = logging.getLogger(__name__)
__DATASETS_INDEX_NAME__ = "ar.datasets"
__DATASETS_RECORDS_INDEX_NAME__ = "ar.dataset.{}"
home_path: Optional[str] = Field(description="The home path where argilla related files will be stored")
base_url: Optional[str] = Field(description="The default base url where server will be deployed")
database_url: Optional[str] = Field(description="The database url that argilla will use as data store")
elasticsearch: str = "http://localhost:9200"
elasticsearch_ssl_verify: bool = True
elasticsearch_ca_path: Optional[str] = None
cors_origins: List[str] = ["*"]
docs_enabled: bool = True
namespace: str = Field(default=None, regex=r"^[a-z]+$")
enable_migration: bool = Field(
default=False,
description="If enabled, try to migrate data from old rubrix installation",
)
# Analyzer configuration
default_es_search_analyzer: str = "standard"
exact_es_search_analyzer: str = "whitespace"
# This line will be enabled once words field won't be used anymore
# wordcloud_es_search_analyzer: str = "multilingual_stop_analyzer"
es_records_index_shards: int = 1
es_records_index_replicas: int = 0
vectors_fields_limit: int = Field(
default=5,
description="Max number of supported vectors per record",
)
metadata_fields_limit: int = Field(
default=50,
gt=0,
le=100,
description="Max number of fields in metadata",
)
metadata_field_length: int = Field(
default=DEFAULT_MAX_KEYWORD_LENGTH,
description="Max length supported for the string metadata fields."
" Values containing higher than this will be truncated",
)
enable_telemetry: bool = True
telemetry_key: str = DEFAULT_TELEMETRY_KEY
@validator("home_path", always=True)
def set_home_path_default(cls, home_path: str):
return home_path or os.path.join(Path.home(), ".argilla")
@validator("base_url", always=True)
def normalize_base_url(cls, base_url: str):
if not base_url:
base_url = "/"
if not base_url.startswith("/"):
base_url = "/" + base_url
if not base_url.endswith("/"):
base_url += "/"
return base_url
@validator("database_url", pre=True, always=True)
def set_database_url(cls, database_url: str, values: dict) -> str:
if not database_url:
home_path = values.get("home_path")
sqlite_file = os.path.join(home_path, "argilla.db")
return f"sqlite+aiosqlite:///{sqlite_file}?check_same_thread=False"
if "sqlite" in database_url:
regex = re.compile(r"sqlite(?!\+aiosqlite)")
if regex.match(database_url):
warnings.warn(
"From version 1.14.0, Argilla will use `aiosqlite` as default SQLite driver. The protocol in the"
" provided database URL has been automatically replaced from `sqlite` to `sqlite+aiosqlite`."
" Please, update your database URL to use `sqlite+aiosqlite` protocol."
)
return re.sub(regex, "sqlite+aiosqlite", database_url)
if "postgresql" in database_url:
regex = re.compile(r"postgresql(?!\+asyncpg)(\+psycopg2)?")
if regex.match(database_url):
warnings.warn(
"From version 1.14.0, Argilla will use `asyncpg` as default PostgreSQL driver. The protocol in the"
" provided database URL has been automatically replaced from `postgresql` to `postgresql+asyncpg`."
" Please, update your database URL to use `postgresql+asyncpg` protocol."
)
return re.sub(regex, "postgresql+asyncpg", database_url)
return database_url
@root_validator(skip_on_failure=True)
def create_home_path(cls, values):
Path(values["home_path"]).mkdir(parents=True, exist_ok=True)
return values
@property
def dataset_index_name(self) -> str:
ns = self.namespace
if ns:
return f"{self.namespace}.{self.__DATASETS_INDEX_NAME__}"
return self.__DATASETS_INDEX_NAME__
@property
def dataset_records_index_name(self) -> str:
ns = self.namespace
if ns:
return f"{self.namespace}.{self.__DATASETS_RECORDS_INDEX_NAME__}"
return self.__DATASETS_RECORDS_INDEX_NAME__
@property
def old_dataset_index_name(self) -> str:
index_name = ".rubrix<NAMESPACE>.datasets-v0"
ns = self.namespace
if ns is None:
return index_name.replace("<NAMESPACE>", "")
return index_name.replace("<NAMESPACE>", f".{ns}")
@property
def METHOD_NAME(self) -> str:
index_name = ".rubrix<NAMESPACE>.dataset.{}.records-v0"
ns = self.namespace
if ns is None:
return index_name.replace("<NAMESPACE>", "")
return index_name.replace("<NAMESPACE>", f".{ns}")
def obfuscated_elasticsearch(self) -> str:
"""Returns configured elasticsearch url obfuscating the provided password, if any"""
parsed = urlparse(self.elasticsearch)
if parsed.password:
return self.elasticsearch.replace(parsed.password, "XXXX")
return self.elasticsearch
class Config:
env_prefix = "ARGILLA_"
settings = Settings() |
298,558 | update cache triggered by prereq | from django.conf import settings
from django.contrib.auth import get_user_model
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from badges.models import Badge
from prerequisites.models import Prereq
from prerequisites.tasks import update_conditions_for_quest, update_quest_conditions_all_users, update_quest_conditions_for_user
from quest_manager.models import Quest, QuestSubmission
User = get_user_model()
# @receiver([post_save, post_delete], sender=BadgeAssertion)
# def update_cache_triggered_by_badge_assertion(sender, instance, *args, **kwargs):
# """ When a user earns a badge, recalculate what quests are available to them.
# """
# BadgeAssertion.badge
# update_quest_conditions_for_user.apply_async(args=[instance.user_id], queue='default')
@receiver([post_save, post_delete], dispatch_uid="prerequisites.signals.update_cache_triggered_by_task_completion")
def update_cache_triggered_by_task_completion(sender, instance, *args, **kwargs):
""" When a user completes a task (e.g. earns a badge, has a quest submission approved or rejected, or joins a course)
Recalculate what is available to them.
"""
# When starting a Quest, it creates a QuestSubmission instance after hitting the start button.
# It just puts the Quest to In progress but does not trigger the availability of new Quests.
# We don't want to update the student's available quest cache because nothing has been completed
# and would just be a waste of resource if compute for new quests
if isinstance(instance, QuestSubmission) and kwargs.get('created') is True:
return
list_of_models = ('BadgeAssertion', 'QuestSubmission', 'CourseStudent')
if sender.__name__ in list_of_models:
# TODO Since the cache is only for quests (as prereq parent object), only need to send for affected quests, not ALL quests?
update_quest_conditions_for_user.apply_async(args=[instance.user_id], queue='default')
# Don't need post_delete, it doesn't affect on result and will be updated on next all conditions update
# @receiver([post_save], sender=Quest)
# def update_conditions_met_for_quest(sender, instance, *args, **kwargs):
# update_conditions_for_quest.apply_async(kwargs={'quest_id': instance.id, 'start_from_user_id': 1}, queue='default')
# @receiver([post_save, post_delete], sender=Prereq)
@receiver([post_save, post_delete], sender=Badge, dispatch_uid="prerequisites.signals.update_conditions_met")
def update_conditions_met(sender, instance, *args, **kwargs):
update_quest_conditions_all_users.apply_async(args=[1], queue='default', countdown=settings.CONDITIONS_UPDATE_COUNTDOWN)
@receiver([post_save], sender=Quest, dispatch_uid="prerequisites.signals.update_cache_triggered_by_quest_without_prereqs")
def update_cache_triggered_by_quest_without_prereqs(sender, instance, *args, **kwargs):
"""
Handle a specific case where available quests is not updated if the Quest does not contain any prerequisites
"""
if not instance.prereqs().exists():
update_quest_conditions_all_users.apply_async(args=[1], queue='default', countdown=settings.CONDITIONS_UPDATE_COUNTDOWN)
@receiver([post_save, post_delete], sender=Prereq, dispatch_uid="prerequisites.signals.update_cache_triggered_by_prereq")
def METHOD_NAME(sender, instance, *args, **kwargs):
""" Update the cache of available quests (PreqAllConditionsMet) for relevant users when Prereq objects are changed,
If the parent of the Prereq object is a quest. (i.e a quest's prereqs were changed)
"""
if instance.parent_content_type.model == 'quest':
# # The parent_object itself being deleted could have cascaded to delete the sender Prereq, so it parent might not exist.
# Cover this instance in a post_delete signal receiver for Quest objects.
if instance.parent_object:
update_conditions_for_quest.apply_async(kwargs={'quest_id': instance.parent_object.id, 'start_from_user_id': 1}, queue='default') |
298,559 | get machine group output | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetMachineGroupResult',
'AwaitableGetMachineGroupResult',
'get_machine_group',
'get_machine_group_output',
]
@pulumi.output_type
class GetMachineGroupResult:
"""
A user-defined logical grouping of machines.
"""
def __init__(__self__, count=None, display_name=None, etag=None, group_type=None, id=None, kind=None, machines=None, name=None, type=None):
if count and not isinstance(count, int):
raise TypeError("Expected argument 'count' to be a int")
pulumi.set(__self__, "count", count)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if group_type and not isinstance(group_type, str):
raise TypeError("Expected argument 'group_type' to be a str")
pulumi.set(__self__, "group_type", group_type)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if machines and not isinstance(machines, list):
raise TypeError("Expected argument 'machines' to be a list")
pulumi.set(__self__, "machines", machines)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def count(self) -> Optional[int]:
"""
Count of machines in this group. The value of count may be bigger than the number of machines in case of the group has been truncated due to exceeding the max number of machines a group can handle.
"""
return pulumi.get(self, "count")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
User defined name for the group
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Resource ETAG.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="groupType")
def group_type(self) -> Optional[str]:
"""
Type of the machine group
"""
return pulumi.get(self, "group_type")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource identifier.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> str:
"""
Additional resource type qualifier.
Expected value is 'machineGroup'.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def machines(self) -> Optional[Sequence['outputs.MachineReferenceWithHintsResponse']]:
"""
References of the machines in this group. The hints within each reference do not represent the current value of the corresponding fields. They are a snapshot created during the last time the machine group was updated.
"""
return pulumi.get(self, "machines")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetMachineGroupResult(GetMachineGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetMachineGroupResult(
count=self.count,
display_name=self.display_name,
etag=self.etag,
group_type=self.group_type,
id=self.id,
kind=self.kind,
machines=self.machines,
name=self.name,
type=self.type)
def get_machine_group(end_time: Optional[str] = None,
machine_group_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
start_time: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetMachineGroupResult:
"""
Returns the specified machine group as it existed during the specified time interval.
Azure REST API version: 2015-11-01-preview.
:param str end_time: UTC date and time specifying the end time of an interval. When not specified the service uses DateTime.UtcNow
:param str machine_group_name: Machine Group resource name.
:param str resource_group_name: Resource group name within the specified subscriptionId.
:param str start_time: UTC date and time specifying the start time of an interval. When not specified the service uses DateTime.UtcNow - 10m
:param str workspace_name: OMS workspace containing the resources of interest.
"""
__args__ = dict()
__args__['endTime'] = end_time
__args__['machineGroupName'] = machine_group_name
__args__['resourceGroupName'] = resource_group_name
__args__['startTime'] = start_time
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:operationalinsights:getMachineGroup', __args__, opts=opts, typ=GetMachineGroupResult).value
return AwaitableGetMachineGroupResult(
count=pulumi.get(__ret__, 'count'),
display_name=pulumi.get(__ret__, 'display_name'),
etag=pulumi.get(__ret__, 'etag'),
group_type=pulumi.get(__ret__, 'group_type'),
id=pulumi.get(__ret__, 'id'),
kind=pulumi.get(__ret__, 'kind'),
machines=pulumi.get(__ret__, 'machines'),
name=pulumi.get(__ret__, 'name'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_machine_group)
def METHOD_NAME(end_time: Optional[pulumi.Input[Optional[str]]] = None,
machine_group_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
start_time: Optional[pulumi.Input[Optional[str]]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetMachineGroupResult]:
"""
Returns the specified machine group as it existed during the specified time interval.
Azure REST API version: 2015-11-01-preview.
:param str end_time: UTC date and time specifying the end time of an interval. When not specified the service uses DateTime.UtcNow
:param str machine_group_name: Machine Group resource name.
:param str resource_group_name: Resource group name within the specified subscriptionId.
:param str start_time: UTC date and time specifying the start time of an interval. When not specified the service uses DateTime.UtcNow - 10m
:param str workspace_name: OMS workspace containing the resources of interest.
"""
... |
298,560 | register default adapters | """
Adapers for JSON types.
"""
# Copyright (C) 2020 The Psycopg Team
import json
from typing import Any, Callable, Dict, Optional, Tuple, Type, Union
from .. import abc
from .. import _oids
from .. import errors as e
from ..pq import Format
from ..adapt import Buffer, Dumper, Loader, PyFormat, AdaptersMap
from ..errors import DataError
JsonDumpsFunction = Callable[[Any], Union[str, bytes]]
JsonLoadsFunction = Callable[[Union[str, bytes]], Any]
def set_json_dumps(
dumps: JsonDumpsFunction, context: Optional[abc.AdaptContext] = None
) -> None:
"""
Set the JSON serialisation function to store JSON objects in the database.
:param dumps: The dump function to use.
:type dumps: `!Callable[[Any], str]`
:param context: Where to use the `!dumps` function. If not specified, use it
globally.
:type context: `~psycopg.Connection` or `~psycopg.Cursor`
By default dumping JSON uses the builtin `json.dumps`. You can override
it to use a different JSON library or to use customised arguments.
If the `Json` wrapper specified a `!dumps` function, use it in precedence
of the one set by this function.
"""
if context is None:
# If changing load function globally, just change the default on the
# global class
_JsonDumper._dumps = dumps
else:
adapters = context.adapters
# If the scope is smaller than global, create subclassess and register
# them in the appropriate scope.
grid = [
(Json, PyFormat.BINARY),
(Json, PyFormat.TEXT),
(Jsonb, PyFormat.BINARY),
(Jsonb, PyFormat.TEXT),
]
dumper: Type[_JsonDumper]
for wrapper, format in grid:
base = _get_current_dumper(adapters, wrapper, format)
name = base.__name__
if not base.__name__.startswith("Custom"):
name = f"Custom{name}"
dumper = type(name, (base,), {"_dumps": dumps})
adapters.register_dumper(wrapper, dumper)
def set_json_loads(
loads: JsonLoadsFunction, context: Optional[abc.AdaptContext] = None
) -> None:
"""
Set the JSON parsing function to fetch JSON objects from the database.
:param loads: The load function to use.
:type loads: `!Callable[[bytes], Any]`
:param context: Where to use the `!loads` function. If not specified, use
it globally.
:type context: `~psycopg.Connection` or `~psycopg.Cursor`
By default loading JSON uses the builtin `json.loads`. You can override
it to use a different JSON library or to use customised arguments.
"""
if context is None:
# If changing load function globally, just change the default on the
# global class
_JsonLoader._loads = loads
else:
# If the scope is smaller than global, create subclassess and register
# them in the appropriate scope.
grid = [
("json", JsonLoader),
("json", JsonBinaryLoader),
("jsonb", JsonbLoader),
("jsonb", JsonbBinaryLoader),
]
loader: Type[_JsonLoader]
for tname, base in grid:
loader = type(f"Custom{base.__name__}", (base,), {"_loads": loads})
context.adapters.register_loader(tname, loader)
class _JsonWrapper:
__slots__ = ("obj", "dumps")
def __init__(self, obj: Any, dumps: Optional[JsonDumpsFunction] = None):
self.obj = obj
self.dumps = dumps
def __repr__(self) -> str:
sobj = repr(self.obj)
if len(sobj) > 40:
sobj = f"{sobj[:35]} ... ({len(sobj)} chars)"
return f"{self.__class__.__name__}({sobj})"
class Json(_JsonWrapper):
__slots__ = ()
class Jsonb(_JsonWrapper):
__slots__ = ()
class _JsonDumper(Dumper):
# The globally used JSON dumps() function. It can be changed globally (by
# set_json_dumps) or by a subclass.
_dumps: JsonDumpsFunction = json.dumps
def __init__(self, cls: type, context: Optional[abc.AdaptContext] = None):
super().__init__(cls, context)
self.dumps = self.__class__._dumps
def dump(self, obj: Any) -> bytes:
if isinstance(obj, _JsonWrapper):
dumps = obj.dumps or self.dumps
obj = obj.obj
else:
dumps = self.dumps
data = dumps(obj)
if isinstance(data, str):
return data.encode()
return data
class JsonDumper(_JsonDumper):
oid = _oids.JSON_OID
class JsonBinaryDumper(_JsonDumper):
format = Format.BINARY
oid = _oids.JSON_OID
class JsonbDumper(_JsonDumper):
oid = _oids.JSONB_OID
class JsonbBinaryDumper(_JsonDumper):
format = Format.BINARY
oid = _oids.JSONB_OID
def dump(self, obj: Any) -> bytes:
return b"\x01" + super().dump(obj)
class _JsonLoader(Loader):
# The globally used JSON loads() function. It can be changed globally (by
# set_json_loads) or by a subclass.
_loads: JsonLoadsFunction = json.loads
def __init__(self, oid: int, context: Optional[abc.AdaptContext] = None):
super().__init__(oid, context)
self.loads = self.__class__._loads
def load(self, data: Buffer) -> Any:
# json.loads() cannot work on memoryview.
if not isinstance(data, bytes):
data = bytes(data)
return self.loads(data)
class JsonLoader(_JsonLoader):
pass
class JsonbLoader(_JsonLoader):
pass
class JsonBinaryLoader(_JsonLoader):
format = Format.BINARY
class JsonbBinaryLoader(_JsonLoader):
format = Format.BINARY
def load(self, data: Buffer) -> Any:
if data and data[0] != 1:
raise DataError("unknown jsonb binary format: {data[0]}")
data = data[1:]
if not isinstance(data, bytes):
data = bytes(data)
return self.loads(data)
def _get_current_dumper(
adapters: AdaptersMap, cls: type, format: PyFormat
) -> Type[abc.Dumper]:
try:
return adapters.get_dumper(cls, format)
except e.ProgrammingError:
return _default_dumpers[cls, format]
_default_dumpers: Dict[Tuple[Type[_JsonWrapper], PyFormat], Type[Dumper]] = {
(Json, PyFormat.BINARY): JsonBinaryDumper,
(Json, PyFormat.TEXT): JsonDumper,
(Jsonb, PyFormat.BINARY): JsonbBinaryDumper,
(Jsonb, PyFormat.TEXT): JsonDumper,
}
def METHOD_NAME(context: abc.AdaptContext) -> None:
adapters = context.adapters
# Currently json binary format is nothing different than text, maybe with
# an extra memcopy we can avoid.
adapters.register_dumper(Json, JsonBinaryDumper)
adapters.register_dumper(Json, JsonDumper)
adapters.register_dumper(Jsonb, JsonbBinaryDumper)
adapters.register_dumper(Jsonb, JsonbDumper)
adapters.register_loader("json", JsonLoader)
adapters.register_loader("jsonb", JsonbLoader)
adapters.register_loader("json", JsonBinaryLoader)
adapters.register_loader("jsonb", JsonbBinaryLoader) |
298,561 | test seh | # Windows specific tests
from ctypes import *
import unittest, sys
from test import support
import _ctypes_test
@unittest.skipUnless(sys.platform == "win32", 'Windows-specific test')
class FunctionCallTestCase(unittest.TestCase):
@unittest.skipUnless('MSC' in sys.version, "SEH only supported by MSC")
@unittest.skipIf(sys.executable.lower().endswith('_d.exe'),
"SEH not enabled in debug builds")
def METHOD_NAME(self):
# Disable faulthandler to prevent logging the warning:
# "Windows fatal exception: access violation"
with support.disable_faulthandler():
# Call functions with invalid arguments, and make sure
# that access violations are trapped and raise an
# exception.
self.assertRaises(OSError, windll.kernel32.GetModuleHandleA, 32)
def test_noargs(self):
# This is a special case on win32 x64
windll.user32.GetDesktopWindow()
@unittest.skipUnless(sys.platform == "win32", 'Windows-specific test')
class ReturnStructSizesTestCase(unittest.TestCase):
def test_sizes(self):
dll = CDLL(_ctypes_test.__file__)
for i in range(1, 11):
fields = [ (f"f{f}", c_char) for f in range(1, i + 1)]
class S(Structure):
_fields_ = fields
f = getattr(dll, f"TestSize{i}")
f.restype = S
res = f()
for i, f in enumerate(fields):
value = getattr(res, f[0])
expected = bytes([ord('a') + i])
self.assertEqual(value, expected)
@unittest.skipUnless(sys.platform == "win32", 'Windows-specific test')
class TestWintypes(unittest.TestCase):
def test_HWND(self):
from ctypes import wintypes
self.assertEqual(sizeof(wintypes.HWND), sizeof(c_void_p))
def test_PARAM(self):
from ctypes import wintypes
self.assertEqual(sizeof(wintypes.WPARAM),
sizeof(c_void_p))
self.assertEqual(sizeof(wintypes.LPARAM),
sizeof(c_void_p))
def test_COMError(self):
from _ctypes import COMError
if support.HAVE_DOCSTRINGS:
self.assertEqual(COMError.__doc__,
"Raised when a COM method call failed.")
ex = COMError(-1, "text", ("details",))
self.assertEqual(ex.hresult, -1)
self.assertEqual(ex.text, "text")
self.assertEqual(ex.details, ("details",))
@unittest.skipUnless(sys.platform == "win32", 'Windows-specific test')
class TestWinError(unittest.TestCase):
def test_winerror(self):
# see Issue 16169
import errno
ERROR_INVALID_PARAMETER = 87
msg = FormatError(ERROR_INVALID_PARAMETER).strip()
args = (errno.EINVAL, msg, None, ERROR_INVALID_PARAMETER)
e = WinError(ERROR_INVALID_PARAMETER)
self.assertEqual(e.args, args)
self.assertEqual(e.errno, errno.EINVAL)
self.assertEqual(e.winerror, ERROR_INVALID_PARAMETER)
windll.kernel32.SetLastError(ERROR_INVALID_PARAMETER)
try:
raise WinError()
except OSError as exc:
e = exc
self.assertEqual(e.args, args)
self.assertEqual(e.errno, errno.EINVAL)
self.assertEqual(e.winerror, ERROR_INVALID_PARAMETER)
class Structures(unittest.TestCase):
def test_struct_by_value(self):
class POINT(Structure):
_fields_ = [("x", c_long),
("y", c_long)]
class RECT(Structure):
_fields_ = [("left", c_long),
("top", c_long),
("right", c_long),
("bottom", c_long)]
dll = CDLL(_ctypes_test.__file__)
pt = POINT(15, 25)
left = c_long.in_dll(dll, 'left')
top = c_long.in_dll(dll, 'top')
right = c_long.in_dll(dll, 'right')
bottom = c_long.in_dll(dll, 'bottom')
rect = RECT(left, top, right, bottom)
PointInRect = dll.PointInRect
PointInRect.argtypes = [POINTER(RECT), POINT]
self.assertEqual(1, PointInRect(byref(rect), pt))
ReturnRect = dll.ReturnRect
ReturnRect.argtypes = [c_int, RECT, POINTER(RECT), POINT, RECT,
POINTER(RECT), POINT, RECT]
ReturnRect.restype = RECT
for i in range(4):
ret = ReturnRect(i, rect, pointer(rect), pt, rect,
byref(rect), pt, rect)
# the c function will check and modify ret if something is
# passed in improperly
self.assertEqual(ret.left, left.value)
self.assertEqual(ret.right, right.value)
self.assertEqual(ret.top, top.value)
self.assertEqual(ret.bottom, bottom.value)
# to not leak references, we must clean _pointer_type_cache
from ctypes import _pointer_type_cache
del _pointer_type_cache[RECT]
if __name__ == '__main__':
unittest.main() |
298,562 | id | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetArtifactSourceResult',
'AwaitableGetArtifactSourceResult',
'get_artifact_source',
'get_artifact_source_output',
]
@pulumi.output_type
class GetArtifactSourceResult:
"""
The resource that defines the source location where the artifacts are located.
"""
def __init__(__self__, artifact_root=None, authentication=None, METHOD_NAME=None, location=None, name=None, source_type=None, tags=None, type=None):
if artifact_root and not isinstance(artifact_root, str):
raise TypeError("Expected argument 'artifact_root' to be a str")
pulumi.set(__self__, "artifact_root", artifact_root)
if authentication and not isinstance(authentication, dict):
raise TypeError("Expected argument 'authentication' to be a dict")
pulumi.set(__self__, "authentication", authentication)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if source_type and not isinstance(source_type, str):
raise TypeError("Expected argument 'source_type' to be a str")
pulumi.set(__self__, "source_type", source_type)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="artifactRoot")
def artifact_root(self) -> Optional[str]:
"""
The path from the location that the 'authentication' property [say, a SAS URI to the blob container] refers to, to the location of the artifacts. This can be used to differentiate different versions of the artifacts. Or, different types of artifacts like binaries or templates. The location referenced by the authentication property concatenated with this optional artifactRoot path forms the artifact source location where the artifacts are expected to be found.
"""
return pulumi.get(self, "artifact_root")
@property
@pulumi.getter
def authentication(self) -> 'outputs.SasAuthenticationResponse':
"""
The authentication method to use to access the artifact source.
"""
return pulumi.get(self, "authentication")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="sourceType")
def source_type(self) -> str:
"""
The type of artifact source used.
"""
return pulumi.get(self, "source_type")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetArtifactSourceResult(GetArtifactSourceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetArtifactSourceResult(
artifact_root=self.artifact_root,
authentication=self.authentication,
METHOD_NAME=self.METHOD_NAME,
location=self.location,
name=self.name,
source_type=self.source_type,
tags=self.tags,
type=self.type)
def get_artifact_source(artifact_source_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetArtifactSourceResult:
"""
The resource that defines the source location where the artifacts are located.
:param str artifact_source_name: The name of the artifact source.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['artifactSourceName'] = artifact_source_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:deploymentmanager/v20191101preview:getArtifactSource', __args__, opts=opts, typ=GetArtifactSourceResult).value
return AwaitableGetArtifactSourceResult(
artifact_root=pulumi.get(__ret__, 'artifact_root'),
authentication=pulumi.get(__ret__, 'authentication'),
METHOD_NAME=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
source_type=pulumi.get(__ret__, 'source_type'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_artifact_source)
def get_artifact_source_output(artifact_source_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetArtifactSourceResult]:
"""
The resource that defines the source location where the artifacts are located.
:param str artifact_source_name: The name of the artifact source.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
... |
298,563 | install step | ##
# Copyright 2009-2023 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for Go packages, implemented as an EasyBlock
@author: Pavel Grochal (INUITS)
"""
import os
from distutils.version import LooseVersion
import easybuild.tools.environment as env
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_root, get_software_version
from easybuild.tools.run import run_cmd
class GoPackage(EasyBlock):
"""Builds and installs a Go package, and provides a dedicated module file."""
@staticmethod
def extra_options(extra_vars=None):
"""Easyconfig parameters specific to Go packages."""
if extra_vars is None:
extra_vars = {}
extra_vars.update({
'modulename': [None, "Module name of the Go package, when building non-native module", CUSTOM],
'forced_deps': [None, "Force specific version of Go package, when building non-native module", CUSTOM],
})
return extra_vars
def prepare_step(self, *args, **kwargs):
"""Go-specific preparations."""
super(GoPackage, self).prepare_step(*args, **kwargs)
if get_software_root('Go') is None:
raise EasyBuildError("Failed to pick go command to use. Is it listed in dependencies?")
if LooseVersion(get_software_version('Go')) < LooseVersion("1.11"):
raise EasyBuildError("Go version < 1.11 doesn't support installing modules from go.mod")
def configure_step(self):
"""Configure Go package build/install."""
# enforce use of go modules
env.setvar('GO111MODULE', 'on', verbose=False)
# set bin folder
env.setvar('GOBIN', os.path.join(self.installdir, 'bin'), verbose=False)
# creates log entries for go being used, for debugging
run_cmd("go version", verbose=False, trace=False)
run_cmd("go env", verbose=False, trace=False)
def build_step(self):
"""If Go package is not native go module, lets try to make the module."""
go_mod_file = 'go.mod'
go_sum_file = 'go.sum'
if not os.path.exists(go_mod_file) or not os.path.isfile(go_mod_file):
self.log.warn("go.mod not found! This is not natively supported go module. Trying to init module.")
if self.cfg['modulename'] is None:
raise EasyBuildError("Installing non-native go module. You need to specify 'modulename' in easyconfig")
# for more information about migrating to go modules
# see: https://blog.golang.org/migrating-to-go-modules
# go mod init
cmd = ' '.join(['go', 'mod', 'init', self.cfg['modulename']])
run_cmd(cmd, log_all=True, simple=True)
if self.cfg['forced_deps']:
for dep in self.cfg['forced_deps']:
# go get specific dependencies which locks them in go.mod
cmd = ' '.join(['go', 'get', '%s@%s' % dep])
run_cmd(cmd, log_all=True, simple=True)
# note: ... (tripledot) used below is not a typo, but go wildcard pattern
# which means: anything you can find in this directory, including all subdirectories
# see: 'go help packages' or https://golang.org/pkg/cmd/go/internal/help/
# see: https://stackoverflow.com/a/28031651/2047157
# building and testing will add packages to go.mod
run_cmd('go build ./...', log_all=True, simple=True)
run_cmd('go test ./...', log_all=True, simple=True)
# tidy up go.mod
run_cmd('go mod tidy', log_all=True, simple=True)
# build and test again, to ensure go mod tidy didn't removed anything needed
run_cmd('go build ./...', log_all=True, simple=True)
run_cmd('go test ./...', log_all=True, simple=True)
self.log.warn('Include generated go.mod and go.sum via patch to ensure locked dependencies '
'and run this easyconfig again.')
run_cmd('cat go.mod', log_all=True, simple=True)
run_cmd('cat go.sum', log_all=True, simple=True)
if not os.path.exists(go_sum_file) or not os.path.isfile(go_sum_file):
raise EasyBuildError("go.sum not found! This module has no locked dependency versions.")
def METHOD_NAME(self):
"""Install Go package to a custom path"""
# actually install Go package
cmd = ' '.join([
self.cfg['preinstallopts'],
'go',
'install',
# print commands as they are executed,
# including downloading and installing of package deps as listed in the go.mod file
'-x',
self.cfg['installopts'],
])
run_cmd(cmd, log_all=True, log_ok=True, simple=True)
def sanity_check_step(self):
"""Custom sanity check for Go package."""
# Check if GoPackage produced binary and can run help on it
custom_paths = {
'files': ['bin/%s' % self.name.lower()],
'dirs': [],
}
custom_commands = ['%s --help' % self.name.lower()]
super(GoPackage, self).sanity_check_step(custom_paths=custom_paths, custom_commands=custom_commands)
def sanity_check_rpath(self, rpath_dirs=None):
super(GoPackage, self).sanity_check_rpath(rpath_dirs=rpath_dirs, check_readelf_rpath=False) |
298,564 | test group instances by specs | # Copyright 2020 Adap GmbH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implments compute classes for EC2."""
import os
import unittest
import warnings
from typing import Sized, cast
from unittest.mock import MagicMock
from .cluster import (
Cluster,
InstanceMismatch,
create_instances,
group_instances_by_specs,
)
from .compute.ec2_adapter import EC2Adapter
from .instance import Instance
IMAGE_ID = "ami-0370b0294d7241341"
KEY_NAME = "flower"
SSH_CREDENTIALS = ("ubuntu", "/Users/tanto/.ssh/flower.pem")
SUBNET_ID = "subnet-23da286f"
SECURITY_GROUP_IDS = ["sg-0dd0f0080bcf86400"]
class CreateInstancesTestCase(unittest.TestCase):
"""Test cases for create_instances."""
def setUp(self) -> None:
"""Prepare tests."""
self.mock_adapter = MagicMock()
self.mock_adapter.create_instances.return_value = [
(1, "1.1.1.1", "2.2.2.1", 22, "running"),
(2, "1.1.1.2", "2.2.2.2", 22, "running"),
]
self.timeout = 10
def test_create_instances(self) -> None:
"""Test if create_instances works correctly."""
# Prepare
instances = [
Instance(name="client_0", group="clients", num_cpu=2, num_ram=8),
Instance(name="client_1", group="clients", num_cpu=2, num_ram=8),
]
# Execute
create_instances(
adapter=self.mock_adapter, instances=instances, timeout=self.timeout
)
# Assert
self.mock_adapter.create_instances.assert_called_once_with(
num_cpu=instances[0].num_cpu,
num_ram=instances[0].num_ram,
num_instance=len(instances),
timeout=10,
gpu=False,
)
for ins in instances:
assert ins.instance_id is not None
assert ins.private_ip is not None
assert ins.public_ip is not None
assert ins.ssh_port is not None
assert ins.state is not None
def test_create_instances_fail(self) -> None:
"""Test if create_instances fails when instances list is invalid."""
# Prepare
instances = [
Instance(name="client_0", group="clients", num_cpu=2, num_ram=8),
Instance(name="client_1", group="clients", num_cpu=1, num_ram=4),
]
# Execute
with self.assertRaises(InstanceMismatch):
create_instances(
adapter=self.mock_adapter, instances=instances, timeout=self.timeout
)
def METHOD_NAME() -> None:
"""Test that function works correctly."""
# Prepare
instances = [
Instance(name="server", group="server", num_cpu=2, num_ram=4),
Instance(name="client_0", group="clients", num_cpu=2, num_ram=8),
Instance(name="logserver", group="logserver", num_cpu=2, num_ram=4),
Instance(name="client_1", group="clients", num_cpu=2, num_ram=8),
]
expected_groups = [[instances[0], instances[2]], [instances[1], instances[3]]]
# Execute
groups = group_instances_by_specs(instances)
# Assert
assert len(groups) == 2
assert groups == expected_groups
if os.getenv("FLOWER_INTEGRATION"):
class ClusterIntegrationTestCase(unittest.TestCase):
"""Integration tests class Cluster.
This TestCase will not mock anythin and use a live EC2Adapter
which will be used to provision a single machine and execute a
single command on it. Afterwards the machines will be shut down.
"""
# pylint: disable=too-many-instance-attributes
def setUp(self) -> None:
"""Create an instance."""
# Filter false positive warning
warnings.filterwarnings(
"ignore",
category=ResourceWarning,
message="unclosed.*<ssl.SSLSocket.*>",
)
adapter = EC2Adapter(
image_id=IMAGE_ID,
key_name=KEY_NAME,
subnet_id=SUBNET_ID,
security_group_ids=SECURITY_GROUP_IDS,
tags=[
("Purpose", "integration_test"),
("Test Name", "ClusterIntegrationTestCase"),
],
)
self.cluster = Cluster(
adapter=adapter,
ssh_credentials=SSH_CREDENTIALS,
instances=[
Instance(name="server", group="server", num_cpu=2, num_ram=2)
],
# In case the tearDown fails for some reason the machines
# should automatically terminate after 10 minutes
timeout=10,
)
self.cluster.start()
def tearDown(self) -> None:
self.cluster.terminate()
def test_exec(self) -> None:
"""Execute on all clients."""
# Prepare
command = "nproc"
expected_result = "2\n"
# Execute
stdout, stderr = self.cluster.exec("server", command)
casted_stderr: Sized = cast(Sized, stderr)
casted_stdout: Sized = cast(Sized, stdout)
# Assert
assert len(casted_stderr) == 0
assert len(casted_stdout) == 1
assert "".join(stdout) == expected_result
if __name__ == "__main__":
unittest.main(verbosity=2) |
298,565 | get info | from abc import ABC
from contextlib import contextmanager
from typing import TYPE_CHECKING, Optional
from typing_extensions import TypeGuard
from mcdreforged.permission.permission_level import PermissionLevel
from mcdreforged.translation.translation_text import RTextMCDRTranslation
from mcdreforged.utils import misc_util
from mcdreforged.utils.exception import IllegalCallError
from mcdreforged.utils.types import MessageText
if TYPE_CHECKING:
from mcdreforged.mcdr_server import MCDReforgedServer
from mcdreforged.info_reactor.info import Info
from mcdreforged.plugin.server_interface import ServerInterface
from mcdreforged.plugin.type.plugin import AbstractPlugin
from mcdreforged.preference.preference_manager import PreferenceItem
class CommandSource(ABC):
"""
:class:`CommandSource`: is an abstracted command executor model. It provides several methods for command execution
Class inheriting tree::
CommandSource
├── InfoCommandSource
│ ├── PlayerCommandSource
│ └── ConsoleCommandSource
└── PluginCommandSource
Plugins can declare a class inherited from :class:`CommandSource` to create their own command source
"""
@property
def is_player(self) -> TypeGuard['PlayerCommandSource']:
"""
If the command source is a player command source
:return: ``True`` if it's a player command source, ``False`` otherwise
"""
return isinstance(self, PlayerCommandSource)
@property
def is_console(self) -> TypeGuard['ConsoleCommandSource']:
"""
If the command source is a console command source
:return: ``True`` if it's a console command source, ``False`` otherwise
"""
return isinstance(self, ConsoleCommandSource)
def get_server(self) -> 'ServerInterface':
"""
Return the server interface instance
"""
raise NotImplementedError()
def get_permission_level(self) -> int:
"""
Return the permission level that the command source has
The permission level is represented by int
"""
raise NotImplementedError()
def get_preference(self) -> 'PreferenceItem':
"""
Return the preference of the command source
By default, the default preference of MCDR from
:meth:`ServerInterface.get_default_preference() <mcdreforged.plugin.server_interface.ServerInterface.get_preference>`
will be returned
Subclasses might override this method to return customized preference.
e.g. :class:`PlayerCommandSource` will return the preference of the corresponding player
.. seealso::
method :meth:`ServerInterface.get_preference() <mcdreforged.plugin.server_interface.ServerInterface.get_preference>`
.. versionadded:: v2.1.0
"""
from mcdreforged.plugin.server_interface import ServerInterface
server = ServerInterface.get_instance()
if server is None:
raise IllegalCallError('Cannot get default preference when MCDR is not running')
return server.get_default_preference()
@contextmanager
def preferred_language_context(self):
"""
A quick helper method to use the language value in preference to create a context
with :meth:`RTextMCDRTranslation.language_context() <mcdreforged.translation.translation_text.RTextMCDRTranslation.language_context>`
.. seealso::
Class :class:`~mcdreforged.translation.translation_text.RTextMCDRTranslation`
Example usage::
with source.preferred_language_context():
message = source.get_server().rtr('my_plugin.placeholder').to_plain_text()
text.set_click_event(RAction.suggest_command, message)
.. versionadded:: v2.1.0
"""
with RTextMCDRTranslation.language_context(self.get_preference().language):
yield
def has_permission(self, level: int) -> bool:
"""
A helper method for testing permission requirement
:param level: The permission level to be tested
:return: If the command source has not less permission level than the given permission level
"""
return self.get_permission_level() >= level
def has_permission_higher_than(self, level: int) -> bool:
"""
Just like the :meth:`CommandSource.has_permission`, but this time it is a greater than judgment
:param level: The permission level to be tested
:return: If the command source has greater permission level than the given permission level
"""
return self.get_permission_level() > level
def reply(self, message: MessageText, **kwargs) -> None:
"""
Send a message to the command source. The message can be anything including RTexts
:param message: The message you want to send
:keyword encoding: The encoding method for the message. It's only used in :class:`PlayerCommandSource`
:keyword console_text: Message override when it's a :class:`ConsoleCommandSource`
"""
raise NotImplementedError()
class InfoCommandSource(CommandSource, ABC):
"""
Command source originated from an info created by MCDR
"""
def __init__(self, mcdr_server: 'MCDReforgedServer', info: 'Info'):
self._mcdr_server = mcdr_server
self.__info = info
def METHOD_NAME(self) -> 'Info':
"""
Return the Info instance that this command source is created from
"""
return self.__info
def get_server(self) -> 'ServerInterface':
return self._mcdr_server.basic_server_interface
def get_permission_level(self) -> int:
return self._mcdr_server.permission_manager.get_permission(self)
def __str__(self):
raise NotImplementedError()
def __repr__(self):
raise NotImplementedError()
class PlayerCommandSource(InfoCommandSource):
def __init__(self, mcdr_server, info, player: str):
if not info.is_from_server:
raise TypeError('{} should be built from server info'.format(self.__class__.__name__))
super().__init__(mcdr_server, info)
self.player: str = player
"""The name of the player"""
def get_preference(self) -> 'PreferenceItem':
return self.get_server().get_preference(self)
def reply(self, message: MessageText, *, encoding: Optional[str] = None, **kwargs):
"""
:keyword encoding: encoding method to be used in :meth:`ServerInterface.tell`
"""
self._mcdr_server.basic_server_interface.tell(self.player, message, encoding=encoding)
def __str__(self):
return 'Player {}'.format(self.player)
def __repr__(self):
return '{}[player={},info_id={}]'.format(type(self).__name__, self.player, self.METHOD_NAME().id)
class ConsoleCommandSource(InfoCommandSource):
def __init__(self, mcdr_server, info):
if not info.is_from_console:
raise TypeError('{} should be built from console info'.format(self.__class__.__name__))
super().__init__(mcdr_server, info)
def get_preference(self) -> 'PreferenceItem':
return self.get_server().get_preference(self)
def reply(self, message: MessageText, *, console_text: Optional[MessageText] = None, **kwargs):
"""
:keyword console_text: If it's specified, overwrite the value of parameter ``message`` with it
"""
if console_text is not None:
message = console_text
with self.preferred_language_context():
misc_util.print_text_to_console(self._mcdr_server.logger, message)
def __str__(self):
return 'Console'
def __repr__(self):
return '{}[info_id={}]'.format(type(self).__name__, self.METHOD_NAME().id)
class PluginCommandSource(CommandSource):
def __init__(self, server: 'ServerInterface', plugin: Optional['AbstractPlugin'] = None):
self.__server = server.as_basic_server_interface()
self.__logger = self.__server.logger
self.__plugin = plugin
def get_server(self) -> 'ServerInterface':
return self.__server
def get_permission_level(self) -> int:
return PermissionLevel.PLUGIN_LEVEL
def reply(self, message: MessageText, **kwargs) -> None:
misc_util.print_text_to_console(self.__logger, message)
def __str__(self):
return 'Plugin' if self.__plugin is None else 'Plugin {}'.format(self.__plugin)
def __repr__(self):
return '{}[plugin={}]'.format(type(self).__name__, self.__plugin) |
298,566 | get token | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from threading import Lock, Condition, Timer, TIMEOUT_MAX, Event
from datetime import timedelta
from typing import Any
from .utils import get_current_utc_as_int
from .utils import create_access_token
class CommunicationTokenCredential(object):
"""Credential type used for authenticating to an Azure Communication service.
:param str token: The token used to authenticate to an Azure Communication service.
:keyword token_refresher: The sync token refresher to provide capacity to fetch a fresh token.
The returned token must be valid (expiration date must be in the future).
:paramtype token_refresher: Callable[[], AccessToken]
:keyword bool proactive_refresh: Whether to refresh the token proactively or not.
If the proactive refreshing is enabled ('proactive_refresh' is true), the credential will use
a background thread to attempt to refresh the token within 10 minutes before the cached token expires,
the proactive refresh will request a new token by calling the 'token_refresher' callback.
When 'proactive_refresh' is enabled, the Credential object must be either run within a context manager
or the 'close' method must be called once the object usage has been finished.
:raises: TypeError if paramater 'token' is not a string
:raises: ValueError if the 'proactive_refresh' is enabled without providing the 'token_refresher' callable.
"""
_ON_DEMAND_REFRESHING_INTERVAL_MINUTES = 2
_DEFAULT_AUTOREFRESH_INTERVAL_MINUTES = 10
def __init__(self, token: str, **kwargs: Any):
if not isinstance(token, str):
raise TypeError("Token must be a string.")
self._token = create_access_token(token)
self._token_refresher = kwargs.pop("token_refresher", None)
self._proactive_refresh = kwargs.pop("proactive_refresh", False)
if self._proactive_refresh and self._token_refresher is None:
raise ValueError(
"When 'proactive_refresh' is True, 'token_refresher' must not be None."
)
self._timer = None
self._lock = Condition(Lock())
self._some_thread_refreshing = False
self._is_closed = Event()
def METHOD_NAME(self, *scopes, **kwargs): # pylint: disable=unused-argument
# type (*str, **Any) -> AccessToken
"""The value of the configured token.
:param any scopes: Scopes to be added to the token.
:return: AccessToken
:rtype: ~azure.core.credentials.AccessToken
"""
if self._proactive_refresh and self._is_closed.is_set():
raise RuntimeError(
"An instance of CommunicationTokenCredential cannot be reused once it has been closed."
)
if not self._token_refresher or not self._is_token_expiring_soon(self._token):
return self._token
self._update_token_and_reschedule()
return self._token
def _update_token_and_reschedule(self):
should_this_thread_refresh = False
with self._lock:
while self._is_token_expiring_soon(self._token):
if self._some_thread_refreshing:
if self._is_token_valid(self._token):
return self._token
self._wait_till_lock_owner_finishes_refreshing()
else:
should_this_thread_refresh = True
self._some_thread_refreshing = True
break
if should_this_thread_refresh:
try:
new_token = self._token_refresher()
if not self._is_token_valid(new_token):
raise ValueError(
"The token returned from the token_refresher is expired."
)
with self._lock:
self._token = new_token
self._some_thread_refreshing = False
self._lock.notify_all()
except:
with self._lock:
self._some_thread_refreshing = False
self._lock.notify_all()
raise
if self._proactive_refresh:
self._schedule_refresh()
return self._token
def _schedule_refresh(self):
if self._is_closed.is_set():
return
if self._timer is not None:
self._timer.cancel()
token_ttl = self._token.expires_on - get_current_utc_as_int()
if self._is_token_expiring_soon(self._token):
# Schedule the next refresh for when it reaches a certain percentage of the remaining lifetime.
timespan = token_ttl // 2
else:
# Schedule the next refresh for when it gets in to the soon-to-expire window.
timespan = (
token_ttl
- timedelta(
minutes=self._DEFAULT_AUTOREFRESH_INTERVAL_MINUTES
).total_seconds()
)
if timespan <= TIMEOUT_MAX:
self._timer = Timer(timespan, self._update_token_and_reschedule)
self._timer.daemon = True
self._timer.start()
def _wait_till_lock_owner_finishes_refreshing(self):
self._lock.release()
self._lock.acquire()
def _is_token_expiring_soon(self, token):
if self._proactive_refresh:
interval = timedelta(minutes=self._DEFAULT_AUTOREFRESH_INTERVAL_MINUTES)
else:
interval = timedelta(minutes=self._ON_DEMAND_REFRESHING_INTERVAL_MINUTES)
return (token.expires_on - get_current_utc_as_int()) < interval.total_seconds()
@classmethod
def _is_token_valid(cls, token):
return get_current_utc_as_int() < token.expires_on
def __enter__(self):
if self._proactive_refresh:
if self._is_closed.is_set():
raise RuntimeError(
"An instance of CommunicationTokenCredential cannot be reused once it has been closed."
)
self._schedule_refresh()
return self
def __exit__(self, *args):
self.close()
def close(self) -> None:
if self._timer is not None:
self._timer.cancel()
self._timer = None
self._is_closed.set() |
298,567 | activate restore | """ParenMatch -- An IDLE extension for parenthesis matching.
When you hit a right paren, the cursor should move briefly to the left
paren. Paren here is used generically; the matching applies to
parentheses, square brackets, and curly braces.
"""
from idlelib.HyperParser import HyperParser
from idlelib.configHandler import idleConf
_openers = {')':'(',']':'[','}':'{'}
CHECK_DELAY = 100 # miliseconds
class ParenMatch:
"""Highlight matching parentheses
There are three supported style of paren matching, based loosely
on the Emacs options. The style is select based on the
HILITE_STYLE attribute; it can be changed used the set_style
method.
The supported styles are:
default -- When a right paren is typed, highlight the matching
left paren for 1/2 sec.
expression -- When a right paren is typed, highlight the entire
expression from the left paren to the right paren.
TODO:
- extend IDLE with configuration dialog to change options
- implement rest of Emacs highlight styles (see below)
- print mismatch warning in IDLE status window
Note: In Emacs, there are several styles of highlight where the
matching paren is highlighted whenever the cursor is immediately
to the right of a right paren. I don't know how to do that in Tk,
so I haven't bothered.
"""
menudefs = [
('edit', [
("Show surrounding parens", "<<flash-paren>>"),
])
]
STYLE = idleConf.GetOption('extensions','ParenMatch','style',
default='expression')
FLASH_DELAY = idleConf.GetOption('extensions','ParenMatch','flash-delay',
type='int',default=500)
HILITE_CONFIG = idleConf.GetHighlight(idleConf.CurrentTheme(),'hilite')
BELL = idleConf.GetOption('extensions','ParenMatch','bell',
type='bool',default=1)
RESTORE_VIRTUAL_EVENT_NAME = "<<parenmatch-check-restore>>"
# We want the restore event be called before the usual return and
# backspace events.
RESTORE_SEQUENCES = ("<KeyPress>", "<ButtonPress>",
"<Key-Return>", "<Key-BackSpace>")
def __init__(self, editwin):
self.editwin = editwin
self.text = editwin.text
# Bind the check-restore event to the function restore_event,
# so that we can then use activate_restore (which calls event_add)
# and deactivate_restore (which calls event_delete).
editwin.text.bind(self.RESTORE_VIRTUAL_EVENT_NAME,
self.restore_event)
self.counter = 0
self.is_restore_active = 0
self.set_style(self.STYLE)
def METHOD_NAME(self):
if not self.is_restore_active:
for seq in self.RESTORE_SEQUENCES:
self.text.event_add(self.RESTORE_VIRTUAL_EVENT_NAME, seq)
self.is_restore_active = True
def deactivate_restore(self):
if self.is_restore_active:
for seq in self.RESTORE_SEQUENCES:
self.text.event_delete(self.RESTORE_VIRTUAL_EVENT_NAME, seq)
self.is_restore_active = False
def set_style(self, style):
self.STYLE = style
if style == "default":
self.create_tag = self.create_tag_default
self.set_timeout = self.set_timeout_last
elif style == "expression":
self.create_tag = self.create_tag_expression
self.set_timeout = self.set_timeout_none
def flash_paren_event(self, event):
indices = (HyperParser(self.editwin, "insert")
.get_surrounding_brackets())
if indices is None:
self.warn_mismatched()
return
self.METHOD_NAME()
self.create_tag(indices)
self.set_timeout_last()
def paren_closed_event(self, event):
# If it was a shortcut and not really a closing paren, quit.
closer = self.text.get("insert-1c")
if closer not in _openers:
return
hp = HyperParser(self.editwin, "insert-1c")
if not hp.is_in_code():
return
indices = hp.get_surrounding_brackets(_openers[closer], True)
if indices is None:
self.warn_mismatched()
return
self.METHOD_NAME()
self.create_tag(indices)
self.set_timeout()
def restore_event(self, event=None):
self.text.tag_delete("paren")
self.deactivate_restore()
self.counter += 1 # disable the last timer, if there is one.
def handle_restore_timer(self, timer_count):
if timer_count == self.counter:
self.restore_event()
def warn_mismatched(self):
if self.BELL:
self.text.bell()
# any one of the create_tag_XXX methods can be used depending on
# the style
def create_tag_default(self, indices):
"""Highlight the single paren that matches"""
self.text.tag_add("paren", indices[0])
self.text.tag_config("paren", self.HILITE_CONFIG)
def create_tag_expression(self, indices):
"""Highlight the entire expression"""
if self.text.get(indices[1]) in (')', ']', '}'):
rightindex = indices[1]+"+1c"
else:
rightindex = indices[1]
self.text.tag_add("paren", indices[0], rightindex)
self.text.tag_config("paren", self.HILITE_CONFIG)
# any one of the set_timeout_XXX methods can be used depending on
# the style
def set_timeout_none(self):
"""Highlight will remain until user input turns it off
or the insert has moved"""
# After CHECK_DELAY, call a function which disables the "paren" tag
# if the event is for the most recent timer and the insert has changed,
# or schedules another call for itself.
self.counter += 1
def callme(callme, self=self, c=self.counter,
index=self.text.index("insert")):
if index != self.text.index("insert"):
self.handle_restore_timer(c)
else:
self.editwin.text_frame.after(CHECK_DELAY, callme, callme)
self.editwin.text_frame.after(CHECK_DELAY, callme, callme)
def set_timeout_last(self):
"""The last highlight created will be removed after .5 sec"""
# associate a counter with an event; only disable the "paren"
# tag if the event is for the most recent timer.
self.counter += 1
self.editwin.text_frame.after(
self.FLASH_DELAY,
lambda self=self, c=self.counter: self.handle_restore_timer(c))
if __name__ == '__main__':
import unittest
unittest.main('idlelib.idle_test.test_parenmatch', verbosity=2) |
298,568 | check end game | """
Team Deathmatch game mode.
Options
^^^^^^^
.. code-block:: toml
[tdm]
# Maximum kills to win the game
kill_limit = 100
# How many points you will get by intel capture
intel_points = 10
# Hide intel from the map and disable the captures
remove_intel = false
# Use intel scores as a percentage to win the game
# This can cause trouble if remove_intel is false
score_percentage = false
..Maintainer: Triplefox
"""
from pyspades.constants import *
from pyspades.contained import IntelCapture
from piqueserver.config import config
from piqueserver.commands import command
import math
TDM_CONFIG = config.section("tdm")
KILL_LIMIT = TDM_CONFIG.option("kill_limit", default=100)
INTEL_POINTS = TDM_CONFIG.option("intel_points", default=10)
REMOVE_INTEL = TDM_CONFIG.option("remove_intel", default=False)
SCORE_PERCENTAGE = TDM_CONFIG.option("score_percentage", default=False)
HIDE_COORD = (0, 0, 0)
@command()
def score(connection):
return connection.protocol.get_kill_count()
def apply_script(protocol, connection, config):
class TDMConnection(connection):
def on_spawn(self, pos):
self.send_chat(self.explain_game_mode())
self.send_chat(self.protocol.get_kill_count())
return connection.on_spawn(self, pos)
def on_flag_take(self):
if REMOVE_INTEL.get():
return False
return connection.on_flag_take(self)
def on_flag_capture(self):
result = connection.on_flag_capture(self)
self.team.kills += INTEL_POINTS.get()
self.protocol.METHOD_NAME(self)
return result
def on_kill(self, killer, type, grenade):
result = connection.on_kill(self, killer, type, grenade)
self.protocol.METHOD_NAME(killer)
return result
def explain_game_mode(self):
msg = 'Team Deathmatch: Kill the opposing team.'
if not REMOVE_INTEL.get():
msg += ' Intel is worth %s kills.' % INTEL_POINTS.get()
return msg
class TDMProtocol(protocol):
game_mode = CTF_MODE
def on_flag_spawn(self, x, y, z, flag, entity_id):
if REMOVE_INTEL.get():
return HIDE_COORD
return protocol.on_flag_spawn(self, x, y, z, flag, entity_id)
def get_kill_count(self):
green_kills = self.green_team.kills
blue_kills = self.blue_team.kills
diff = green_kills - blue_kills
if green_kills > blue_kills:
return ("%s leads %s-%s (+%s, %s left). Playing to %s kills." %
(self.green_team.name,
green_kills, blue_kills,
diff,
KILL_LIMIT.get() - green_kills,
KILL_LIMIT.get()))
elif green_kills < blue_kills:
return ("%s leads %s-%s (+%s, %s left). Playing to %s kills." %
(self.blue_team.name,
blue_kills, green_kills,
-diff,
KILL_LIMIT.get() - blue_kills,
KILL_LIMIT.get()))
else:
return ("%s-%s, %s left. Playing to %s kills." %
(green_kills,
blue_kills,
KILL_LIMIT.get() - green_kills,
KILL_LIMIT.get()))
# since its a team based game, we gonna split the caps
# for all players in the team
def do_captures(self, team, caps):
while (team.score < caps):
for player in team.get_players():
if team.score >= caps:
break
team.score += 1
intel_capture = IntelCapture()
intel_capture.player_id = player.player_id
intel_capture.winning = False
self.broadcast_contained(intel_capture)
def METHOD_NAME(self, player):
if SCORE_PERCENTAGE.get() and player:
team = player.team
caps_percent = math.floor(
self.max_score*team.kills/KILL_LIMIT.get())
if caps_percent > team.score:
self.do_captures(team, caps_percent)
if self.green_team.kills >= KILL_LIMIT.get():
self.broadcast_chat("%s Team Wins, %s - %s" %
(self.green_team.name,
self.green_team.kills,
self.blue_team.kills))
self.reset_game(player)
protocol.on_game_end(self)
elif self.blue_team.kills >= KILL_LIMIT.get():
self.broadcast_chat("%s Team Wins, %s - %s" %
(self.blue_team.name,
self.blue_team.kills,
self.green_team.kills))
self.reset_game(player)
protocol.on_game_end(self)
return TDMProtocol, TDMConnection |
298,569 | get cov | import math
from typing import Optional
import torch
from torch import Tensor
from .kernel import Kernel
def _fmax(r: Tensor, j: int, q: int) -> Tensor:
return torch.max(torch.tensor(0.0, dtype=r.dtype, device=r.device), 1 - r).pow(j + q)
def METHOD_NAME(r: Tensor, j: int, q: int) -> Tensor:
if q == 0:
return 1
if q == 1:
return (j + 1) * r + 1
if q == 2:
return 1 + (j + 2) * r + ((j + 4 * j + 3) / 3.0) * (r**2)
if q == 3:
return (
1
+ (j + 3) * r
+ ((6 * j**2 + 36 * j + 45) / 15.0) * r.square()
+ ((j**3 + 9 * j**2 + 23 * j + 15) / 15.0) * (r**3)
)
class PiecewisePolynomialKernel(Kernel):
r"""
Computes a covariance matrix based on the Piecewise Polynomial kernel
between inputs :math:`\mathbf{x_1}` and :math:`\mathbf{x_2}`:
.. math::
\begin{align}
r &= \left\Vert x1 - x2 \right\Vert \\
j &= \lfloor \frac{D}{2} \rfloor + q +1 \\
K_{\text{ppD, 0}}(\mathbf{x_1}, \mathbf{x_2}) &= (1-r)^j_+ , \\
K_{\text{ppD, 1}}(\mathbf{x_1}, \mathbf{x_2}) &= (1-r)^{j+1}_+ ((j + 1)r + 1), \\
K_{\text{ppD, 2}}(\mathbf{x_1}, \mathbf{x_2}) &= (1-r)^{j+2}_+ ((1 + (j+2)r +
\frac{j^2 + 4j + 3}{3}r^2), \\
K_{\text{ppD, 3}}(\mathbf{x_1}, \mathbf{x_2}) &= (1-r)^{j+3}_+
(1 + (j+3)r + \frac{6j^2 + 36j + 45}{15}r^2 +
\frac{j^3 + 9j^2 + 23j +15}{15}r^3) \\
\end{align}
where :math:`K_{\text{ppD, q}}` is positive semidefinite in :math:`\mathbb{R}^{D}` and
:math:`q` is the smoothness coefficient. See `Rasmussen and Williams (2006)`_ Equation 4.21.
.. note:: This kernel does not have an `outputscale` parameter. To add a scaling parameter,
decorate this kernel with a :class:`gpytorch.kernels.ScaleKernel`.
:param int q: (default= 2) The smoothness parameter.
:type q: int (0, 1, 2 or 3)
:param ard_num_dims: (Default: `None`) Set this if you want a separate lengthscale for each
input dimension. It should be `d` if x1 is a `... x n x d` matrix.
:type ard_num_dims: int, optional
:param batch_shape: (Default: `None`) Set this if you want a separate lengthscale for each
batch of input data. It should be `torch.Size([b1, b2])` for a `b1 x b2 x n x m` kernel output.
:type batch_shape: torch.Size, optional
:param active_dims: (Default: `None`) Set this if you want to
compute the covariance of only a few input dimensions. The ints
corresponds to the indices of the dimensions.
:type active_dims: Tuple(int)
:param lengthscale_prior: (Default: `None`)
Set this if you want to apply a prior to the lengthscale parameter.
:type lengthscale_prior: ~gpytorch.priors.Prior, optional
:param lengthscale_constraint: (Default: `Positive`) Set this if you want
to apply a constraint to the lengthscale parameter.
:type lengthscale_constraint: ~gpytorch.constraints.Positive, optional
:param eps: (Default: 1e-6) The minimum value that the lengthscale can take (prevents divide by zero errors).
:type eps: float, optional
.. _Rasmussen and Williams (2006):
http://www.gaussianprocess.org/gpml/
Example:
>>> x = torch.randn(10, 5)
>>> # Non-batch option
>>> covar_module = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.PiecewisePolynomialKernel(q = 2))
>>> # Non-batch: ARD (different lengthscale for each input dimension)
>>> covar_module = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.PiecewisePolynomialKernel(q = 2, ard_num_dims=5)
)
>>> covar = covar_module(x) # Output: LinearOperator of size (10 x 10)
>>> batch_x = torch.randn(2, 10, 5)
>>> # Batch: different lengthscale for each batch
>>> covar_module = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.PiecewisePolynomialKernel(q = 2, batch_shape=torch.Size([2]))
)
>>> covar = covar_module(batch_x) # Output: LinearOperator of size (2 x 10 x 10)
"""
has_lengthscale = True
def __init__(self, q: Optional[int] = 2, **kwargs):
super(PiecewisePolynomialKernel, self).__init__(**kwargs)
if q not in {0, 1, 2, 3}:
raise ValueError("q expected to be 0, 1, 2 or 3")
self.q = q
def forward(self, x1: Tensor, x2: Tensor, last_dim_is_batch: bool = False, diag: bool = False, **params) -> Tensor:
x1_ = x1.div(self.lengthscale)
x2_ = x2.div(self.lengthscale)
if last_dim_is_batch is True:
D = x1.shape[1]
else:
D = x1.shape[-1]
j = math.floor(D / 2.0) + self.q + 1
if last_dim_is_batch and diag:
r = self.covar_dist(x1_, x2_, last_dim_is_batch=True, diag=True)
elif diag:
r = self.covar_dist(x1_, x2_, diag=True)
elif last_dim_is_batch:
r = self.covar_dist(x1_, x2_, last_dim_is_batch=True)
else:
r = self.covar_dist(x1_, x2_)
cov_matrix = _fmax(r, j, self.q) * METHOD_NAME(r, j, self.q)
return cov_matrix |
298,570 | set config file path | # @file
# Firmware Module Management Tool.
#
# Copyright (c) 2021, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
import argparse
from core.FMMTOperation import *
parser = argparse.ArgumentParser(description='''
View the Binary Structure of FD/FV/Ffs/Section, and Delete/Extract/Add/Replace a Ffs from/into a FV.
''')
parser.add_argument("--version", action="version", version='%(prog)s Version 1.0',
help="Print debug information.")
parser.add_argument("-v", "--View", dest="View", nargs='+',
help="View each FV and the named files within each FV: '-v inputfile outputfile, inputfiletype(.Fd/.Fv/.ffs/.sec)'")
parser.add_argument("-d", "--Delete", dest="Delete", nargs='+',
help="Delete a Ffs from FV: '-d inputfile TargetFvName(Optional) TargetFfsName outputfile\
If not given TargetFvName, all the existed target Ffs will be deleted'")
parser.add_argument("-e", "--Extract", dest="Extract", nargs='+',
help="Extract a Ffs Info: '-e inputfile TargetFvName(Optional) TargetFfsName outputfile\
If not given TargetFvName, the first found target Ffs will be extracted.\
If only given TargetFvName, not given TargetFfsName, the TargetFv will be extracted to output file'")
parser.add_argument("-a", "--Add", dest="Add", nargs='+',
help="Add a Ffs into a FV:'-a inputfile TargetFvName newffsfile outputfile'")
parser.add_argument("-r", "--Replace", dest="Replace", nargs='+',
help="Replace a Ffs in a FV: '-r inputfile TargetFvName(Optional) TargetFfsName newffsfile outputfile\
If not given TargetFvName, all the existed target Ffs will be replaced with new Ffs file)'")
parser.add_argument("-l", "--LayoutFileName", dest="LayoutFileName", nargs='+',
help="The output file which saves Binary layout: '-l xxx.txt'/'-l xxx.json'\
If only provide file format as 'txt', \
the file will be generated with default name (Layout_'InputFileName'.txt). \
Currently supports two formats: json, txt. More formats will be added in the future")
parser.add_argument("-c", "--ConfigFilePath", dest="ConfigFilePath", nargs='+',
help="Provide the target FmmtConf.ini file path: '-c C:\Code\FmmtConf.ini' \
FmmtConf file saves the target guidtool used in compress/uncompress process.\
If do not provide, FMMT tool will search the inputfile folder for FmmtConf.ini firstly, if not found,\
the FmmtConf.ini saved in FMMT tool's folder will be used as default.")
parser.add_argument("-s", "--ShrinkFv", dest="ShrinkFv", nargs='+',
help="Shrink the Fv file: '-s InputFvfile OutputFvfile")
def print_banner():
print("")
class FMMT():
def __init__(self) -> None:
self.firmware_packet = {}
def METHOD_NAME(self, configfilepath:str) -> str:
os.environ['FmmtConfPath'] = os.path.abspath(configfilepath)
def SetDestPath(self, inputfile:str) -> str:
os.environ['FmmtConfPath'] = ''
self.dest_path = os.path.dirname(os.path.abspath(inputfile))
old_env = os.environ['PATH']
os.environ['PATH'] = self.dest_path + os.pathsep + old_env
def CheckFfsName(self, FfsName:str) -> str:
try:
return uuid.UUID(FfsName)
except:
return FfsName
def GetFvName(self, FvName:str) -> str:
try:
return uuid.UUID(FvName)
except:
return FvName
def View(self, inputfile: str, layoutfilename: str=None, outputfile: str=None) -> None:
# ViewFile(inputfile, ROOT_TYPE, logfile, outputfile)
self.SetDestPath(inputfile)
filetype = os.path.splitext(inputfile)[1].lower()
if filetype == '.fd':
ROOT_TYPE = ROOT_TREE
elif filetype == '.fv':
ROOT_TYPE = ROOT_FV_TREE
elif filetype == '.ffs':
ROOT_TYPE = ROOT_FFS_TREE
elif filetype == '.sec':
ROOT_TYPE = ROOT_SECTION_TREE
elif filetype == '.elf':
ROOT_TYPE = ROOT_ELF_TREE
else:
ROOT_TYPE = ROOT_TREE
ViewFile(inputfile, ROOT_TYPE, layoutfilename, outputfile)
def Delete(self, inputfile: str, TargetFfs_name: str, outputfile: str, Fv_name: str=None) -> None:
self.SetDestPath(inputfile)
if Fv_name:
DeleteFfs(inputfile, self.CheckFfsName(TargetFfs_name), outputfile, self.GetFvName(Fv_name))
else:
DeleteFfs(inputfile, self.CheckFfsName(TargetFfs_name), outputfile)
def Extract(self, inputfile: str, Ffs_name: str, outputfile: str, Fv_name: str=None) -> None:
self.SetDestPath(inputfile)
if Fv_name:
ExtractFfs(inputfile, self.CheckFfsName(Ffs_name), outputfile, self.GetFvName(Fv_name))
else:
ExtractFfs(inputfile, self.CheckFfsName(Ffs_name), outputfile)
def Add(self, inputfile: str, Fv_name: str, newffsfile: str, outputfile: str) -> None:
self.SetDestPath(inputfile)
AddNewFfs(inputfile, self.CheckFfsName(Fv_name), newffsfile, outputfile)
def Replace(self,inputfile: str, Ffs_name: str, newffsfile: str, outputfile: str, Fv_name: str=None) -> None:
self.SetDestPath(inputfile)
if Fv_name:
ReplaceFfs(inputfile, self.CheckFfsName(Ffs_name), newffsfile, outputfile, self.GetFvName(Fv_name))
else:
ReplaceFfs(inputfile, self.CheckFfsName(Ffs_name), newffsfile, outputfile)
def Shrink(self,inputfile: str, outputfile: str) -> None:
self.SetDestPath(inputfile)
ShrinkFv(inputfile, outputfile)
def main():
args=parser.parse_args()
status=0
try:
fmmt=FMMT()
if args.ConfigFilePath:
fmmt.METHOD_NAME(args.ConfigFilePath[0])
if args.View:
if args.LayoutFileName:
fmmt.View(args.View[0], args.LayoutFileName[0])
else:
fmmt.View(args.View[0])
elif args.Delete:
if len(args.Delete) == 4:
fmmt.Delete(args.Delete[0],args.Delete[2],args.Delete[3],args.Delete[1])
else:
fmmt.Delete(args.Delete[0],args.Delete[1],args.Delete[2])
elif args.Extract:
if len(args.Extract) == 4:
fmmt.Extract(args.Extract[0],args.Extract[2],args.Extract[3], args.Extract[1])
else:
fmmt.Extract(args.Extract[0],args.Extract[1],args.Extract[2])
elif args.Add:
fmmt.Add(args.Add[0],args.Add[1],args.Add[2],args.Add[3])
elif args.Replace:
if len(args.Replace) == 5:
fmmt.Replace(args.Replace[0],args.Replace[2],args.Replace[3],args.Replace[4],args.Replace[1])
else:
fmmt.Replace(args.Replace[0],args.Replace[1],args.Replace[2],args.Replace[3])
elif args.ShrinkFv:
fmmt.Shrink(args.ShrinkFv[0], args.ShrinkFv[1])
else:
parser.print_help()
except Exception as e:
print(e)
return status
if __name__ == "__main__":
exit(main()) |
298,571 | test network l | import unittest
import os
import shutil
from tempfile import mkdtemp
from subprocess import Popen, PIPE
class SepolicyTests(unittest.TestCase):
def assertDenied(self, err):
self.assert_('Permission denied' in err,
'"Permission denied" not found in %r' % err)
def assertNotFound(self, err):
self.assert_('not found' in err,
'"not found" not found in %r' % err)
def assertFailure(self, status):
self.assertNotEqual(status, 0,
'Succeeded when it should have failed')
def assertSuccess(self, status, err):
self.assertEqual(status, 0,
'sepolicy should have succeeded for this test %r' % err)
def test_man_domain(self):
"Verify sepolicy manpage -d works"
p = Popen(['sepolicy', 'manpage', '-d', 'httpd_t'], stdout=PIPE)
out, err = p.communicate()
self.assertSuccess(p.returncode, err)
def test_man_all(self):
"Verify sepolicy manpage -a works"
p = Popen(['sepolicy', 'manpage', '-a'], stdout=PIPE)
out, err = p.communicate()
self.assertSuccess(p.returncode, err)
def METHOD_NAME(self):
"Verify sepolicy network -l works"
p = Popen(['sepolicy', 'network', '-l'], stdout=PIPE)
out, err = p.communicate()
self.assertSuccess(p.returncode, err)
def test_network_t(self):
"Verify sepolicy network -t works"
p = Popen(['sepolicy', 'network', '-t', 'http_port_t'], stdout=PIPE)
out, err = p.communicate()
self.assertSuccess(p.returncode, err)
def test_network_p(self):
"Verify sepolicy network -p works"
p = Popen(['sepolicy', 'network', '-p', '80'], stdout=PIPE)
out, err = p.communicate()
self.assertSuccess(p.returncode, err)
def test_network_d(self):
"Verify sepolicy network -d works"
p = Popen(['sepolicy', 'network', '-d', 'httpd_t'], stdout=PIPE)
out, err = p.communicate()
self.assertSuccess(p.returncode, err)
def test_transition_s(self):
"Verify sepolicy transition -s works"
p = Popen(['sepolicy', 'transition', '-s', 'httpd_t'], stdout=PIPE)
out, err = p.communicate()
self.assertSuccess(p.returncode, err)
def test_transition_t(self):
"Verify sepolicy transition -t works"
p = Popen(['sepolicy', 'transition', '-s', 'httpd_t', '-t', 'sendmail_t'], stdout=PIPE)
out, err = p.communicate()
self.assertSuccess(p.returncode, err)
def test_booleans_a(self):
"Verify sepolicy booleans -a works"
p = Popen(['sepolicy', 'booleans', '-a'], stdout=PIPE)
out, err = p.communicate()
self.assertSuccess(p.returncode, err)
def test_booleans_b_alias(self):
"Verify sepolicy booleans -b works"
p = Popen(['sepolicy', 'booleans', '-b', 'allow_ypbind'], stdout=PIPE)
out, err = p.communicate()
self.assertSuccess(p.returncode, err)
def test_booleans_b(self):
"Verify sepolicy booleans -b works"
p = Popen(['sepolicy', 'booleans', '-b', 'nis_enabled'], stdout=PIPE)
out, err = p.communicate()
self.assertSuccess(p.returncode, err)
def test_interface_l(self):
"Verify sepolicy interface -l works"
p = Popen(['sepolicy', 'interface', '-l'], stdout=PIPE)
out, err = p.communicate()
self.assertSuccess(p.returncode, err)
def test_interface_a(self):
"Verify sepolicy interface -a works"
p = Popen(['sepolicy', 'interface', '-a'], stdout=PIPE)
out, err = p.communicate()
self.assertSuccess(p.returncode, err)
def test_interface_p(self):
"Verify sepolicy interface -u works"
p = Popen(['sepolicy', 'interface', '-u'], stdout=PIPE)
out, err = p.communicate()
self.assertSuccess(p.returncode, err)
def test_interface_ci(self):
"Verify sepolicy interface -c -i works"
p = Popen(['sepolicy', 'interface', '-c', '-i', 'apache_admin'], stdout=PIPE)
out, err = p.communicate()
self.assertSuccess(p.returncode, err)
if __name__ == "__main__":
import selinux
if selinux.is_selinux_enabled() and selinux.security_getenforce() == 1:
unittest.main()
else:
print("SELinux must be in enforcing mode for this test") |
298,572 | hf private dataset repo zipped img data | import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
CI_HUB_USER = "__DUMMY_TRANSFORMERS_USER__"
CI_HUB_USER_FULL_NAME = "Dummy User"
CI_HUB_USER_TOKEN = "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"
CI_HUB_ENDPOINT = "https://hub-ci.huggingface.co"
CI_HUB_DATASETS_URL = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
CI_HUB_TOKEN_PATH = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def ci_hfh_hf_hub_url(monkeypatch):
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE
)
@pytest.fixture
def ci_hub_config(monkeypatch):
monkeypatch.setattr("datasets.config.HF_ENDPOINT", CI_HUB_ENDPOINT)
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL", CI_HUB_DATASETS_URL)
@pytest.fixture
def ci_hub_token_path(monkeypatch):
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token", CI_HUB_TOKEN_PATH)
@pytest.fixture
def set_ci_hub_access_token(ci_hub_config, ci_hub_token_path):
HfFolder.save_token(CI_HUB_USER_TOKEN)
yield
HfFolder.delete_token()
@pytest.fixture(scope="session")
def hf_api():
return HfApi(endpoint=CI_HUB_ENDPOINT)
@pytest.fixture(scope="session")
def hf_token():
yield CI_HUB_USER_TOKEN
@pytest.fixture
def cleanup_repo(hf_api):
def _cleanup_repo(repo_id):
hf_api.delete_repo(repo_id, token=CI_HUB_USER_TOKEN, repo_type="dataset")
return _cleanup_repo
@pytest.fixture
def temporary_repo(cleanup_repo):
@contextmanager
def _temporary_repo(repo_id):
try:
yield repo_id
finally:
cleanup_repo(repo_id)
return _temporary_repo
@pytest.fixture(scope="session")
def hf_private_dataset_repo_txt_data_(hf_api: HfApi, hf_token, text_file):
repo_name = f"repo_txt_data-{int(time.time() * 10e3)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(text_file),
path_in_repo="data/text_data.txt",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_txt_data(hf_private_dataset_repo_txt_data_, ci_hub_config, ci_hfh_hf_hub_url):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_txt_data_(hf_api: HfApi, hf_token, zip_csv_with_dir_path):
repo_name = f"repo_zipped_txt_data-{int(time.time() * 10e3)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_csv_with_dir_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_txt_data(
hf_private_dataset_repo_zipped_txt_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session")
def METHOD_NAME(hf_api: HfApi, hf_token, zip_image_path):
repo_name = f"repo_zipped_img_data-{int(time.time() * 10e3)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_image_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_img_data(
METHOD_NAME, ci_hub_config, ci_hfh_hf_hub_url
):
return METHOD_NAME |
298,573 | scrub meta | import logging
from copy import copy
from jsonobject.api import re_date
from corehq.util.metrics import metrics_counter
from dimagi.utils.parsing import json_format_datetime
from corehq.util.dates import iso_string_to_datetime
def METHOD_NAME(xform):
if not hasattr(xform, 'form'):
return
found_old = scrub_form_meta(xform.form_id, xform.form)
if found_old:
metrics_counter('commcare.xform_submissions.old_metadata', tags={
'domain': xform.domain,
})
def scrub_form_meta(form_id, form_data):
"""
Cleans up old format metadata to our current standard.
Does NOT save the doc, but returns whether the doc needs to be saved.
"""
property_map = {'TimeStart': 'timeStart',
'TimeEnd': 'timeEnd',
'chw_id': 'userID',
'DeviceID': 'deviceID',
'uid': 'instanceID'}
# hack to make sure uppercase meta still ends up in the right place
found_old = False
if 'Meta' in form_data:
form_data['meta'] = form_data['Meta']
del form_data['Meta']
found_old = True
if 'meta' in form_data:
meta_block = form_data['meta']
# scrub values from 0.9 to 1.0
if isinstance(meta_block, list):
if isinstance(meta_block[0], dict):
# if it's a list of dictionaries, arbitrarily pick the first one
# this is a pretty serious error, but it's also recoverable
form_data['meta'] = meta_block = meta_block[0]
logging.error((
'form %s contains multiple meta blocks. '
'this is not correct but we picked one abitrarily'
) % form_id)
else:
# if it's a list of something other than dictionaries.
# don't bother scrubbing.
logging.error('form %s contains a poorly structured meta block.'
'this might cause data display problems.')
if isinstance(meta_block, dict):
for key in list(meta_block):
if key in property_map and property_map[key] not in meta_block:
meta_block[property_map[key]] = meta_block[key]
del meta_block[key]
found_old = True
return found_old
def clean_metadata(_meta_block):
meta_block = copy(dict(_meta_block))
if not meta_block:
return meta_block
meta_block = _remove_unused_meta_attributes(meta_block)
meta_block['appVersion'] = _get_text_attribute(meta_block.get('appVersion'))
meta_block['location'] = _get_text_attribute(meta_block.get('location'))
meta_block = _parse_meta_times(meta_block)
# also clean dicts on the return value, since those are not allowed
for key in meta_block:
if isinstance(meta_block[key], dict):
meta_block[key] = _flatten_dict(meta_block[key])
return meta_block
def _flatten_dict(dictionary):
return ", ".join("{}:{}".format(k, v) for k, v in dictionary.items())
def _remove_unused_meta_attributes(meta_block):
for key in list(meta_block.keys()):
# remove attributes from the meta block
if key.startswith('@'):
del meta_block[key]
return meta_block
def _parse_meta_times(meta_block):
for key in ("timeStart", "timeEnd"):
if key not in meta_block:
continue
if meta_block[key]:
if re_date.match(meta_block[key]):
# this kind of leniency is pretty bad and making it midnight in UTC
# is totally arbitrary here for backwards compatibility
meta_block[key] += 'T00:00:00.000000Z'
try:
# try to parse to ensure correctness
parsed = iso_string_to_datetime(meta_block[key])
# and set back in the right format in case it was a date, not a datetime
meta_block[key] = json_format_datetime(parsed)
except Exception:
logging.exception('Could not parse meta_block')
# we couldn't parse it
del meta_block[key]
else:
# it was empty, also a failure
del meta_block[key]
return meta_block
def _get_text_attribute(node):
if node is None:
return None
if isinstance(node, dict) and '#text' in node:
value = node['#text']
elif isinstance(node, dict) and all(a.startswith('@') for a in node):
return None
else:
value = node
if isinstance(value, bytes):
value = value.decode('utf-8')
elif not isinstance(value, str):
value = str(value)
return value |
298,574 | test repr | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import json
from datetime import datetime
import pyowm.commons.exceptions
from pyowm.weatherapi25.location import Location
from pyowm.airpollutionapi30.coindex import COIndex
from pyowm.utils.formatting import datetime_to_UNIXtime
COINDEX_JSON = '{"time":"2016-10-01T13:07:01Z","location":{"latitude":0,"longitude":9.2359},"data":[{"precision":-4.999999987376214e-07,"pressure":1000,"value":8.609262636127823e-08},{ "precision":-4.999999987376214e-07,"pressure":681.2920532226562,"value":1.1352169337897067e-07},{ "precision":-4.999999987376214e-07,"pressure":464.15887451171875,"value":1.1864428017815953e-07}]}'
COINDEX_MALFORMED_JSON = '{"time":"2016-10-01T13:07:01Z","xyz":[]}'
COINDEX_JSON_DUMP = '{"reference_time": 1234567, "co_samples": [{"pressure": ' \
'1000, "value": 8.168363052618588e-08, "precision": ' \
'-4.999999987376214e-07}, {"pressure": 681.2920532226562, ' \
'"value": 8.686949115599418e-08, "precision": ' \
'-4.999999987376214e-07}, {"pressure": 464.15887451171875, ' \
'"value": 8.871462853221601e-08, "precision": ' \
'-4.999999987376214e-07}], "location": {"country": "UK", ' \
'"name": "test", "coordinates": {"lat": 43.7, "lon": 12.3}, ' \
'"ID": 987}, "interval": "day", "reception_time": 1475283600}'
class TestCOIndex(unittest.TestCase):
__test_reception_time = 1475283600
__test_iso_reception_time = "2016-10-01 01:00:00+00:00"
__test_date_reception_time = datetime.fromisoformat(__test_iso_reception_time)
__test_reference_time = 1234567
__test_iso_reference_time = "1970-01-15 06:56:07+00:00"
__test_date_reference_time = datetime.fromisoformat(__test_iso_reference_time)
__test_location = Location('test', 12.3, 43.7, 987, 'UK')
__test_co_samples = [
{
"precision": -4.999999987376214e-7,
"pressure": 1000,
"value": 8.168363052618588e-8
},
{
"precision": -4.999999987376214e-7,
"pressure": 681.2920532226562,
"value": 8.686949115599418e-8
},
{
"precision": -4.999999987376214e-7,
"pressure": 464.15887451171875,
"value": 8.871462853221601e-8
}
]
__test_interval = 'day'
__test_instance = COIndex(
__test_reference_time, __test_location, __test_interval,
__test_co_samples, __test_reception_time)
def test_init_fails_when_reference_time_is_negative(self):
self.assertRaises(ValueError, COIndex, -1234567,
self.__test_location,
self.__test_interval,
self.__test_co_samples,
self.__test_reception_time)
def test_init_fails_when_reception_time_is_negative(self):
self.assertRaises(ValueError, COIndex,
self.__test_reference_time,
self.__test_location,
self.__test_interval,
self.__test_co_samples,
-1234567)
def test_init_fails_when_co_samples_is_not_a_list(self):
self.assertRaises(ValueError, COIndex, self.__test_reference_time,
self.__test_location, self.__test_interval, 'test',
self.__test_reception_time)
def test_returning_different_formats_for_reference_time(self):
self.assertEqual(self.__test_instance.reference_time(timeformat='iso'), \
self.__test_iso_reference_time)
self.assertEqual(self.__test_instance.reference_time(timeformat='unix'), \
self.__test_reference_time)
self.assertEqual(self.__test_instance.reference_time(timeformat='date'), \
self.__test_date_reference_time)
def test_returning_different_formats_for_reception_time(self):
self.assertEqual(self.__test_instance.reception_time(timeformat='iso'), \
self.__test_iso_reception_time)
self.assertEqual(self.__test_instance.reception_time(timeformat='unix'), \
self.__test_reception_time)
self.assertEqual(self.__test_instance.reception_time(timeformat='date'), \
self.__test_date_reception_time)
def test_is_forecast(self):
self.assertFalse(self.__test_instance.is_forecast())
in_a_year = datetime_to_UNIXtime(datetime.utcnow()) + 31536000
uvindex = COIndex(in_a_year,
self.__test_location, self.__test_interval,
[], self.__test_reception_time)
self.assertTrue(uvindex.is_forecast())
def test_co_sample_with_highest_vmr(self):
expected = {
"precision": -4.999999987376214e-7,
"pressure": 464.15887451171875,
"value": 8.871462853221601e-8
}
result = self.__test_instance.sample_with_highest_vmr()
self.assertEqual(expected, result)
def test_co_sample_with_lowest_vmr(self):
expected = {
"precision": -4.999999987376214e-7,
"pressure": 1000,
"value": 8.168363052618588e-8
}
result = self.__test_instance.sample_with_lowest_vmr()
self.assertEqual(expected, result)
def test_from_dict(self):
d = json.loads(COINDEX_JSON)
result = COIndex.from_dict(d)
self.assertIsNotNone(result)
self.assertIsNotNone(result.reference_time())
self.assertIsNotNone(result.reference_time())
loc = result.location
self.assertIsNotNone(loc)
self.assertIsNone(loc.name)
self.assertIsNone(loc.id)
self.assertIsNotNone(loc.lon)
self.assertIsNotNone(loc.lat)
self.assertIsNone(result.interval)
self.assertNotEqual(0, len(result.co_samples))
def test_from_dict_fails_when_JSON_data_is_None(self):
self.assertRaises(pyowm.commons.exceptions.ParseAPIResponseError, COIndex.from_dict, None)
def test_from_dict_fails_with_malformed_JSON_data(self):
self.assertRaises(pyowm.commons.exceptions.ParseAPIResponseError, COIndex.from_dict, json.loads(COINDEX_MALFORMED_JSON))
def test_to_dict(self):
expected = json.loads(COINDEX_JSON_DUMP)
result = self.__test_instance.to_dict()
ordered_str_expected = sorted(str(expected))
ordered_str_result = sorted(str(result))
self.assertEqual(ordered_str_expected, ordered_str_result)
def METHOD_NAME(self):
print(self.__test_instance) |
298,575 | csv dict reader | import re
from datetime import datetime, date
from django.db.models import Q
from django.contrib.contenttypes.models import ContentType
from scripts.management.csv_command import CsvCommand
from bims.models import SourceReference, BiologicalCollectionRecord, ChemicalRecord
from bims.utils.user import create_users_from_string
from geonode.documents.models import Document
TAXON_GROUP = 'Taxon Group'
TAXON = 'Taxon'
class Command(CsvCommand):
def csv_file_name(self, options):
# Return name of the csv file
return 'Biobase.Study.Reference.Table.For.DOI.URL.-.06-03-20.csv'
def METHOD_NAME(self, csv_reader):
for row in csv_reader:
title = row['Title']
fixed_title = re.sub(' +', ' ', title)
url = row['URL']
dul = row['Document Upload Link']
reference_category = row['Reference category']
source_references = SourceReference.objects.filter(
Q(sourcereferencebibliography__source__title__icontains=title) |
Q(sourcereferencedocument__source__title__icontains=title)
)
if source_references.exists():
source_reference = source_references[0]
if reference_category.lower() not in source_reference.reference_type.lower():
print('---Change to document---')
if dul:
try:
doc_split = dul.split('/')
document_id = int(doc_split[len(doc_split) - 1])
document = Document.objects.get(id=document_id)
print('---Create new source reference')
new_source_reference = (
SourceReference.create_source_reference(
category='document',
source_id=document.id,
note=None
)
)
print('---Update bio records---')
BiologicalCollectionRecord.objects.filter(
source_reference=source_reference
).update(
source_reference=new_source_reference
)
ChemicalRecord.objects.filter(
source_reference=source_reference
).update(
source_reference=new_source_reference
)
print('---Delete old source reference---')
source_reference.delete()
except (ValueError, Document.DoesNotExist):
print ('Document {} does not exist'.format(
dul))
if url:
document_fields = {
'doc_url': url,
'title': fixed_title
}
if row['Year']:
document_fields['date'] = date(
year=int(row['Year']),
month=1,
day=1
)
authors = create_users_from_string(
row['Author(s)'])
if len(authors) > 0:
author = authors[0]
else:
author = None
document_fields['owner'] = author
document, document_created = Document.objects.get_or_create(
**document_fields
)
new_source_reference = (
SourceReference.create_source_reference(
category='document',
source_id=document.id,
note=None
)
)
print('---Update bio records---')
BiologicalCollectionRecord.objects.filter(
source_reference=source_reference
).update(
source_reference=new_source_reference
)
ChemicalRecord.objects.filter(
source_reference=source_reference
).update(
source_reference=new_source_reference
)
print('---Delete old source reference---')
source_reference.delete()
if reference_category.lower() == 'unpublished data':
print(fixed_title)
else:
if title != fixed_title:
print('---Fix title---')
print(fixed_title)
source_reference.source.title = fixed_title
source_reference.source.save() |
298,576 | test identifiers wrrs | # Copyright (c) 2011 Blue Pines Technologies LLC, Brad Carleton
# www.bluepines.org
# Copyright (c) 2012 42 Lines Inc., Jim Browne
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import time
from tests.compat import unittest
from nose.plugins.attrib import attr
from boto.route53.connection import Route53Connection
from boto.exception import TooManyRecordsException
from boto.vpc import VPCConnection
@attr(route53=True)
class TestRoute53Zone(unittest.TestCase):
@classmethod
def setUpClass(self):
route53 = Route53Connection()
self.base_domain = 'boto-test-%s.com' % str(int(time.time()))
zone = route53.get_zone(self.base_domain)
if zone is not None:
zone.delete()
self.zone = route53.create_zone(self.base_domain)
def test_nameservers(self):
self.zone.get_nameservers()
def test_a(self):
self.zone.add_a(self.base_domain, '102.11.23.1', 80)
record = self.zone.get_a(self.base_domain)
self.assertEquals(record.name, u'%s.' % self.base_domain)
self.assertEquals(record.resource_records, [u'102.11.23.1'])
self.assertEquals(record.ttl, u'80')
self.zone.update_a(self.base_domain, '186.143.32.2', '800')
record = self.zone.get_a(self.base_domain)
self.assertEquals(record.name, u'%s.' % self.base_domain)
self.assertEquals(record.resource_records, [u'186.143.32.2'])
self.assertEquals(record.ttl, u'800')
def test_cname(self):
self.zone.add_cname(
'www.%s' % self.base_domain,
'webserver.%s' % self.base_domain,
200
)
record = self.zone.get_cname('www.%s' % self.base_domain)
self.assertEquals(record.name, u'www.%s.' % self.base_domain)
self.assertEquals(record.resource_records, [
u'webserver.%s.' % self.base_domain
])
self.assertEquals(record.ttl, u'200')
self.zone.update_cname(
'www.%s' % self.base_domain,
'web.%s' % self.base_domain,
45
)
record = self.zone.get_cname('www.%s' % self.base_domain)
self.assertEquals(record.name, u'www.%s.' % self.base_domain)
self.assertEquals(record.resource_records, [
u'web.%s.' % self.base_domain
])
self.assertEquals(record.ttl, u'45')
def test_mx(self):
self.zone.add_mx(
self.base_domain,
[
'10 mx1.%s' % self.base_domain,
'20 mx2.%s' % self.base_domain,
],
1000
)
record = self.zone.get_mx(self.base_domain)
self.assertEquals(set(record.resource_records),
set([u'10 mx1.%s.' % self.base_domain,
u'20 mx2.%s.' % self.base_domain]))
self.assertEquals(record.ttl, u'1000')
self.zone.update_mx(
self.base_domain,
[
'10 mail1.%s' % self.base_domain,
'20 mail2.%s' % self.base_domain,
],
50
)
record = self.zone.get_mx(self.base_domain)
self.assertEquals(set(record.resource_records),
set([u'10 mail1.%s.' % self.base_domain,
'20 mail2.%s.' % self.base_domain]))
self.assertEquals(record.ttl, u'50')
def test_get_records(self):
self.zone.get_records()
def test_get_nameservers(self):
self.zone.get_nameservers()
def test_get_zones(self):
route53 = Route53Connection()
route53.get_zones()
def METHOD_NAME(self):
self.zone.add_a('wrr.%s' % self.base_domain, '1.2.3.4',
identifier=('foo', '20'))
self.zone.add_a('wrr.%s' % self.base_domain, '5.6.7.8',
identifier=('bar', '10'))
wrrs = self.zone.find_records(
'wrr.%s' % self.base_domain,
'A',
all=True
)
self.assertEquals(len(wrrs), 2)
self.zone.delete_a('wrr.%s' % self.base_domain, all=True)
def test_identifiers_lbrs(self):
self.zone.add_a('lbr.%s' % self.base_domain, '4.3.2.1',
identifier=('baz', 'us-east-1'))
self.zone.add_a('lbr.%s' % self.base_domain, '8.7.6.5',
identifier=('bam', 'us-west-1'))
lbrs = self.zone.find_records(
'lbr.%s' % self.base_domain,
'A',
all=True
)
self.assertEquals(len(lbrs), 2)
self.zone.delete_a('lbr.%s' % self.base_domain,
identifier=('bam', 'us-west-1'))
self.zone.delete_a('lbr.%s' % self.base_domain,
identifier=('baz', 'us-east-1'))
def test_toomany_exception(self):
self.zone.add_a('exception.%s' % self.base_domain, '4.3.2.1',
identifier=('baz', 'us-east-1'))
self.zone.add_a('exception.%s' % self.base_domain, '8.7.6.5',
identifier=('bam', 'us-west-1'))
self.assertRaises(TooManyRecordsException,
lambda: self.zone.get_a('exception.%s' %
self.base_domain))
self.zone.delete_a('exception.%s' % self.base_domain, all=True)
@classmethod
def tearDownClass(self):
self.zone.delete_a(self.base_domain)
self.zone.delete_cname('www.%s' % self.base_domain)
self.zone.delete_mx(self.base_domain)
self.zone.delete()
@attr(route53=True)
class TestRoute53PrivateZone(unittest.TestCase):
@classmethod
def setUpClass(self):
time_str = str(int(time.time()))
self.route53 = Route53Connection()
self.base_domain = 'boto-private-zone-test-%s.com' % time_str
self.vpc = VPCConnection()
self.test_vpc = self.vpc.create_vpc(cidr_block='10.11.0.0/16')
# tag the vpc to make it easily identifiable if things go spang
self.test_vpc.add_tag("Name", self.base_domain)
self.zone = self.route53.get_zone(self.base_domain)
if self.zone is not None:
self.zone.delete()
def test_create_private_zone(self):
self.zone = self.route53.create_hosted_zone(self.base_domain,
private_zone=True,
vpc_id=self.test_vpc.id,
vpc_region='us-east-1')
@classmethod
def tearDownClass(self):
if self.zone is not None:
self.zone.delete()
self.test_vpc.delete()
if __name__ == '__main__':
unittest.main(verbosity=3) |
298,577 | fmt slices used | # -*- coding: utf-8 -*-
"""
Created on Sat Apr 12 17:58:51 2014
@author: david
"""
import numpy as np
try:
import copy_reg
except ImportError:
import copyreg as copy_reg
def pickleSlice(slice):
return unpickleSlice, (slice.start, slice.stop, slice.step)
def unpickleSlice(start, stop, step):
return slice(start, stop, step)
copy_reg.pickle(slice, pickleSlice, unpickleSlice)
def replNoneWith1(n):
if n is None:
return 1
else:
return n
def METHOD_NAME(slicesUsed):
if slicesUsed is None:
return ((-1,-1,-1),(-1,-1,-1),(-1,-1,-1))
else:
return tuple([(sl.start, sl.stop, replNoneWith1(sl.step)) for sl in slicesUsed] )
def _tuplify(var):
try:
return tuple(var)
except TypeError:
return var
def pack_results(dtype, tIndex, fitResults, fitError=None, startParams=None, slicesUsed=None, resultCode=-1, **kwargs):
""" Pack fit results into a structured array of the given dtype
Collects logic from fit factories to a central place which hopefully makes it easier to
maintain.
Parameters
----------
dtype : np.dtype
the numpy dtype of the structured array we want to pack into
tIndex : int
the current frame number
fitResults : np.ndarray
the fit parameters in the order they are defined in the dtype
fitError : np.ndarray
the fit errors in the order they are defined in the dtype
startParams : np.ndarray, optional
the start parameters in the order they are defined in the dtype
slicesUsed : tuple, optional
a 3-tuple of slice objects (xslice, yslice, zslice) that define the ROI used for this molecule
resultCode : int, optional
the result code as returned by the fitting routine
**kwargs : dict, optional
any additional information which gets stored in the structured array, either a scalar or a numpy array
Returns
-------
np.recarray
The packed results array
TODOS:
- Support length mismatch on data
FIXME: This currently uses tuples which is really gross for a number of reasons (e.g. moves what should be a numpy
low level c loop into python, relies on implicitly coercing types rather than doing it explicitly). For some
reason it is currently faster than assigning to views into an array even though it really should be quite a
lot slower. If numpy history is anything to go by, it's also quite likely to break at some point in the future.
"""
dtype = np.dtype(dtype)
if fitError is None:
fitError = -5e3 + 0 * fitResults
if startParams is None:
startParams = -5e3 + 0 * fitResults
slicesUsed = METHOD_NAME(slicesUsed)
ns = locals()
ns.update(kwargs)
return np.array(tuple([_tuplify(ns[n]) for n in dtype.names]), dtype=dtype)
###############################################
# Below are various experimental alternatives to pack_results. They are still a work in progress, but should
# hopefully let us replace some of the tuple madness in the above one. Of the alternatives, _pack_results4, which
# pushes stuff into a pre-allocated array is ~2 times faster than the tuple based code above, but would need quite
# a lot of additional refactoring in the calling code to make it actually work (the exceptions here are the Multifit
# and GPU fitting classes. Punting that to some point in the future for now.
def _pack_results1(dtype, flatdtype, tIndex, fitResults, fitError=None, startParams=None, slicesUsed=None, resultCode=-1, **kwargs):
dtype = np.dtype(dtype)
if fitError is None:
fitError = -5e3 + 0 * fitResults
if startParams is None:
startParams = -5e3 + 0 * fitResults
slicesUsed = np.ravel(METHOD_NAME(slicesUsed))
ns = locals()
ns.update(kwargs)
res = np.zeros(1, dtype=flatdtype)
for n in dtype.names:
res[n] = ns[n]
return res.view(dtype)
def _pack_results4(out, flat_out, tIndex, fitResults, fitError=None, startParams=None, slicesUsed=None, resultCode=-1,
**kwargs):
if fitError is None:
fitError = -5e3 + 0 * fitResults
if startParams is None:
startParams = -5e3 + 0 * fitResults
slicesUsed = np.ravel(METHOD_NAME(slicesUsed))
ns = locals()
ns.update(kwargs)
for n in out.dtype.names:
flat_out[n] = ns[n]
return out
def _pack_results3(dtype, flatdtype, tIndex, fitResults, fitError=None, startParams=None, slicesUsed=None, resultCode=-1,
**kwargs):
dtype = np.dtype(dtype)
if fitError is None:
fitError = -5e3 + 0 * fitResults
if startParams is None:
startParams = -5e3 + 0 * fitResults
slicesUsed = np.ravel(METHOD_NAME(slicesUsed))
ns = locals()
ns.update(kwargs)
#res = np.zeros(1, dtype=flatdtype)
#for n in dtype.names:
# d = ns[n]
return np.array(tuple([ns[n] for n in dtype.names]), flatdtype).view(dtype)
def _pack_results2(dtype, tIndex, fitResults, fitError=None, startParams=None, slicesUsed=None, resultCode=-1, **kwargs):
dtype = np.dtype(dtype)
if fitError is None:
fitError = -5e3 + 0 * fitResults
if startParams is None:
startParams = -5e3 + 0 * fitResults
slicesUsed = METHOD_NAME(slicesUsed)
ns = locals()
ns.update(kwargs)
res = np.zeros(1, dtype=dtype)
for n in dtype.names:
res[n] = _tuplify(ns[n])
return res
#generate a flat dtype from a standard nested one (incomplete)
def _gen_flat_dtype(dtype):
dtype = np.dtype(dtype)
out_dt = []
for n in dtype.names:
field_dt = dtype.fields[n][0]
|
298,578 | run | import logging as log
from avocado.core import exceptions
from avocado.utils import cpu
from avocado.utils import process
from virttest import libvirt_version
from virttest import virsh
from virttest.libvirt_xml import vm_xml
from virttest.libvirt_xml import xcepts
from virttest.utils_test import libvirt
# Using as lower capital is not the best way to do, but this is just a
# workaround to avoid changing the entire file.
logging = log.getLogger('avocado.' + __name__)
def get_iothreadpins(vm_name, options):
"""
Get some iothreadpins info from the guests xml
Returns:
The iothreadpins
"""
if "--config" in options:
xml_info = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
else:
xml_info = vm_xml.VMXML.new_from_dumpxml(vm_name)
logging.debug("domxml: %s", xml_info)
try:
return xml_info.cputune.iothreadpins
except xcepts.LibvirtXMLNotFoundError:
return None
def setup_vmxml_before_start(vmxml, params):
"""
Configure vm xml using given parameters
:param vmxml: vm xml
:param params: dict for the test
"""
iothreads = params.get("iothreads")
iothreadids = params.get("iothreadids")
iothreadpins = params.get("iothreadpins")
if iothreadids:
ids_xml = vm_xml.VMIothreadidsXML()
ids_xml.iothread = [{'id': id} for id in iothreadids.split()]
vmxml.iothreadids = ids_xml
if iothreadpins:
cputune_xml = vm_xml.VMCPUTuneXML()
io_pins = []
for pins in iothreadpins.split():
thread, cpu = pins.split(':')
io_pins.append({"iothread": thread,
"cpuset": cpu})
cputune_xml.iothreadpins = io_pins
vmxml.cputune = cputune_xml
if iothreads:
vmxml.iothreads = int(iothreads)
logging.debug("Pre-test xml is %s", vmxml)
vmxml.sync()
def get_dom_option(vm, vm_ref):
"""
Get the domain option for iothreadpin
:param vm: vm object
:param vm_ref: vm reference
:return: str, vm name or domain id or domain uuid or others
"""
domid = vm.get_id() # only valid for running
domuuid = vm.get_uuid()
if vm_ref == "name":
dom_option = vm.name
elif vm_ref == "id":
dom_option = domid
elif vm_ref == "uuid":
dom_option = domuuid
else:
dom_option = vm_ref
return dom_option
def process_cpuset(params, test):
"""
Process cpuset value for some specific tests
:param params: dict for testing
:param test: test object
:return: int, cpuset to be used for iothreadpin
"""
cpuset = params.get("cpuset")
disallowed_cpuset = params.get('disallowed_cpuset', 'no') == 'yes'
if disallowed_cpuset:
# Set cpuset to the first cpu id just for testing
cpuset_cpus_path = '/sys/fs/cgroup/machine.slice/cpuset.cpus'
logging.debug("Set allowed cpuset to %s", cpuset_cpus_path)
online_cpu_list = cpu.online_list()
if cpu.online_count() == 1:
test.cancel("At least 2 online cpus are needed for this test case.")
cmd = "echo %d > %s" % (online_cpu_list[0], cpuset_cpus_path)
process.METHOD_NAME(cmd, ignore_status=False, shell=True)
cpuset = online_cpu_list[1]
return cpuset
def verify_test_disallowed_cpuset(vm_name, options, test):
"""
Verify the test for disallowed cpuset case and iothreadpin info should
not exist
:param vm_name: vm name
:param options: iothreadpin options
:param test: test object
:raises: test.fail if iothreadpin info still exists in dumpxml
"""
def _check_iothreadpins():
iothreadpins = get_iothreadpins(vm_name, options)
if iothreadpins:
test.fail("iothreadpin info '%s' in guest xml is not expected" % iothreadpins)
else:
logging.debug("iothreadpins info does not exist as expected")
_check_iothreadpins()
virsh_dargs = {"debug": True, "ignore_status": False}
virsh.managedsave(vm_name, **virsh_dargs)
virsh.start(vm_name, **virsh_dargs)
_check_iothreadpins()
def METHOD_NAME(test, params, env):
"""
Test command: virsh iothread.
The command can change the number of iothread.
1.Prepare test environment,destroy or suspend a VM.
2.Perform virsh iothreadadd operation.
3.Recover test environment.
4.Confirm the test result.
"""
libvirt_version.is_libvirt_feature_supported(params)
vm_name = params.get("main_vm")
vm = env.get_vm(vm_name)
pre_vm_state = params.get("iothread_pre_vm_state")
command = params.get("iothread_command", "iothread")
options = params.get("iothread_options")
status_error = "yes" == params.get("status_error")
add_iothread_id = params.get("add_iothread_id")
iothread_id = params.get("iothread_id")
disallowed_cpuset = params.get("disallowed_cpuset")
error_msg = params.get("error_msg")
# Save original configuration
vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
orig_config_xml = vmxml.copy()
try:
if vm.is_alive():
vm.destroy()
option_list = options.split(" ")
for item in option_list:
if virsh.has_command_help_match(command, item) is None:
raise exceptions.TestSkipError("The current libvirt version"
" doesn't support '%s' option"
% item)
setup_vmxml_before_start(vmxml, params)
# Restart, unless that's not our test
if not vm.is_alive():
vm.start()
vm.wait_for_login()
dom_option = get_dom_option(vm, params.get("iothread_vm_ref"))
if pre_vm_state == "shut off" and vm.is_alive():
vm.destroy()
virsh_dargs = {"debug": True}
if "yes" == params.get("readonly", "no"):
virsh_dargs.update({"readonly": True})
cpuset = process_cpuset(params, test)
if add_iothread_id:
iothread_id = add_iothread_id
virsh.iothreadadd(dom_option, add_iothread_id, debug=True, ignore_status=False)
ret = virsh.iothreadpin(dom_option, iothread_id, cpuset,
options, **virsh_dargs)
if error_msg:
libvirt.check_result(ret, expected_fails=error_msg)
else:
libvirt.check_exit_status(ret, status_error)
if disallowed_cpuset:
verify_test_disallowed_cpuset(vm_name, options, test)
if not status_error:
# Check domainxml
iothread_info = get_iothreadpins(vm_name, options)
logging.debug("iothreadinfo: %s", iothread_info)
for info in iothread_info:
if info["iothread"] == iothread_id and info["cpuset"] == cpuset:
# Find the iothreadpins in domain xml
break
elif iothread_info.index(info) == (len(iothread_info) - 1):
# Can not find the iothreadpins at last
raise exceptions.TestFail("Failed to add iothread %s in domain xml",
iothread_id)
# Check iothreadinfo by virsh command
iothread_info = libvirt.get_iothreadsinfo(dom_option, options)
logging.debug("iothreadinfo: %s", iothread_info)
if (iothread_id not in iothread_info or
iothread_info[iothread_id] != cpuset):
raise exceptions.TestFail("Failed to add iothreadpins %s", iothread_id)
finally:
# Cleanup
if vm.is_alive():
vm.destroy()
orig_config_xml.sync() |
298,579 | values | """Manage HTTP Response Headers
Much of this module is red-handedly pilfered from email.message in the stdlib,
so portions are Copyright (C) 2001,2002 Python Software Foundation, and were
written by Barry Warsaw.
"""
# Regular expression that matches `special' characters in parameters, the
# existence of which force quoting of the parameter value.
import re
tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]')
def _formatparam(param, value=None, quote=1):
"""Convenience function to format and return a key=value pair.
This will quote the value if needed or if quote is true.
"""
if value is not None and len(value) > 0:
if quote or tspecials.search(value):
value = value.replace('\\', '\\\\').replace('"', r'\"')
return '%s="%s"' % (param, value)
else:
return '%s=%s' % (param, value)
else:
return param
class Headers:
"""Manage a collection of HTTP response headers"""
def __init__(self, headers=None):
headers = headers if headers is not None else []
if type(headers) is not list:
raise TypeError("Headers must be a list of name/value tuples")
self._headers = headers
if __debug__:
for k, v in headers:
self._convert_string_type(k)
self._convert_string_type(v)
def _convert_string_type(self, value):
"""Convert/check value type."""
if type(value) is str:
return value
raise AssertionError("Header names/values must be"
" of type str (got {0})".format(repr(value)))
def __len__(self):
"""Return the total number of headers, including duplicates."""
return len(self._headers)
def __setitem__(self, name, val):
"""Set the value of a header."""
del self[name]
self._headers.append(
(self._convert_string_type(name), self._convert_string_type(val)))
def __delitem__(self,name):
"""Delete all occurrences of a header, if present.
Does *not* raise an exception if the header is missing.
"""
name = self._convert_string_type(name.lower())
self._headers[:] = [kv for kv in self._headers if kv[0].lower() != name]
def __getitem__(self,name):
"""Get the first header value for 'name'
Return None if the header is missing instead of raising an exception.
Note that if the header appeared multiple times, the first exactly which
occurrence gets returned is undefined. Use getall() to get all
the values matching a header field name.
"""
return self.get(name)
def __contains__(self, name):
"""Return true if the message contains the header."""
return self.get(name) is not None
def get_all(self, name):
"""Return a list of all the values for the named field.
These will be sorted in the order they appeared in the original header
list or were added to this instance, and may contain duplicates. Any
fields deleted and re-inserted are always appended to the header list.
If no fields exist with the given name, returns an empty list.
"""
name = self._convert_string_type(name.lower())
return [kv[1] for kv in self._headers if kv[0].lower()==name]
def get(self,name,default=None):
"""Get the first header value for 'name', or return 'default'"""
name = self._convert_string_type(name.lower())
for k,v in self._headers:
if k.lower()==name:
return v
return default
def keys(self):
"""Return a list of all the header field names.
These will be sorted in the order they appeared in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [k for k, v in self._headers]
def METHOD_NAME(self):
"""Return a list of all header values.
These will be sorted in the order they appeared in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [v for k, v in self._headers]
def items(self):
"""Get all the header fields and values.
These will be sorted in the order they were in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return self._headers[:]
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._headers)
def __str__(self):
"""str() returns the formatted headers, complete with end line,
suitable for direct HTTP transmission."""
return '\r\n'.join(["%s: %s" % kv for kv in self._headers]+['',''])
def __bytes__(self):
return str(self).encode('iso-8859-1')
def setdefault(self,name,value):
"""Return first matching header value for 'name', or 'value'
If there is no header named 'name', add a new header with name 'name'
and value 'value'."""
result = self.get(name)
if result is None:
self._headers.append((self._convert_string_type(name),
self._convert_string_type(value)))
return value
else:
return result
def add_header(self, _name, _value, **_params):
"""Extended header setting.
_name is the header field to add. keyword arguments can be used to set
additional parameters for the header field, with underscores converted
to dashes. Normally the parameter will be added as key="value" unless
value is None, in which case only the key will be added.
Example:
h.add_header('content-disposition', 'attachment', filename='bud.gif')
Note that unlike the corresponding 'email.message' method, this does
*not* handle '(charset, language, value)' tuples: all values must be
strings or None.
"""
parts = []
if _value is not None:
_value = self._convert_string_type(_value)
parts.append(_value)
for k, v in _params.items():
k = self._convert_string_type(k)
if v is None:
parts.append(k.replace('_', '-'))
else:
v = self._convert_string_type(v)
parts.append(_formatparam(k.replace('_', '-'), v))
self._headers.append((self._convert_string_type(_name), "; ".join(parts))) |
298,580 | test get by pet id basic types | # --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import json
import pytest
from async_generator import yield_, async_generator
from azure.core.exceptions import HttpResponseError, ResourceNotFoundError
from xmserrorresponse.aio import XMSErrorResponseExtensions
from xmserrorresponse.models import NotFoundErrorBase, AnimalNotFound, LinkNotFound, PetActionError, PetSadError, PetHungryOrThirstyError
@pytest.fixture
@async_generator
async def client():
async with XMSErrorResponseExtensions(base_url="http://localhost:3000") as client:
await yield_(client)
class TestXmsErrorResponse(object):
@pytest.mark.asyncio
async def test_get_by_pet_id_success(self, client):
pet = await client.pet.get_pet_by_id("tommy")
assert pet.name == "Tommy Tomson"
await client.pet.get_pet_by_id('django') # no fail, 202
@pytest.mark.asyncio
async def test_get_by_pet_id_discriminator(self, client):
assert issubclass(AnimalNotFound, NotFoundErrorBase)
assert issubclass(LinkNotFound, NotFoundErrorBase)
with pytest.raises(HttpResponseError) as excinfo:
await client.pet.get_pet_by_id("coyoteUgly")
assert isinstance(excinfo.value.model, AnimalNotFound)
assert excinfo.value.model.reason == "the type of animal requested is not available"
with pytest.raises(HttpResponseError) as excinfo:
await client.pet.get_pet_by_id("weirdAlYankovic")
assert isinstance(excinfo.value.model, LinkNotFound)
assert excinfo.value.model.reason == "link to pet not found"
@pytest.mark.asyncio
async def METHOD_NAME(self, client):
with pytest.raises(Exception) as excinfo:
await client.pet.get_pet_by_id("ringo")
assert excinfo.value.model is None # no model attached
assert json.loads(excinfo.value.response.text()) == "ringo is missing"
with pytest.raises(Exception) as excinfo:
await client.pet.get_pet_by_id("alien123")
assert excinfo.value.model is None # no model attached
assert json.loads(excinfo.value.response.text()) == 123
@pytest.mark.asyncio
async def test_do_something_success(self, client):
result = await client.pet.do_something("stay")
assert result.action_response is None
@pytest.mark.asyncio
async def test_do_something_error(self, client):
assert issubclass(PetSadError, PetActionError)
assert issubclass(PetHungryOrThirstyError, PetActionError)
with pytest.raises(HttpResponseError) as excinfo:
await client.pet.do_something("jump")
assert isinstance(excinfo.value.model, PetSadError)
assert excinfo.value.model.reason == "need more treats"
with pytest.raises(ResourceNotFoundError) as excinfo:
await client.pet.do_something("fetch")
@pytest.mark.asyncio
async def test_error_deserialization_with_param_name_models(self, client):
with pytest.raises(HttpResponseError) as excinfo:
await client.pet.has_models_param()
assert isinstance(excinfo.value.model, PetSadError)
assert excinfo.value.status_code == 500
@pytest.mark.asyncio
async def test_failsafe_deserialize(self, client):
from xmserrorresponse.operations._pet_operations import build_do_something_request
request = build_do_something_request(what_action="jump")
request.url = client._client.format_url(request.url)
pipeline_response = await client._client._pipeline.run(request)
class MyPetSadError(PetSadError):
def read(self):
return b"ignore me"
pipeline_response.context['deserialized_data'] = {
"reason": "Not OK",
"errorMessage": "i should be the message",
"errorType": "my own error type",
"actionResponse": "hello"
}
# add pipeline context with deserialized data and pass to failsafe_deserialize
# should get a correct model
error_model = client._deserialize.failsafe_deserialize(MyPetSadError, pipeline_response)
assert isinstance(error_model, MyPetSadError)
assert error_model.action_response == "hello"
assert error_model.error_message == "i should be the message"
error = HttpResponseError(response=pipeline_response.http_response, model=error_model)
assert isinstance(error.model, MyPetSadError)
assert error.model.action_response == "hello"
assert error.model.error_message == "i should be the message" |
298,581 | list | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._available_skus_operations import build_list_request
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AvailableSkusOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.databoxedge.v2020_05_01_preview.aio.DataBoxEdgeManagementClient`'s
:attr:`available_skus` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = METHOD_NAME(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def METHOD_NAME(self, **kwargs: Any) -> AsyncIterable["_models.DataBoxEdgeSku"]:
"""List all the available Skus and information related to them.
List all the available Skus and information related to them.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DataBoxEdgeSku or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.databoxedge.v2020_05_01_preview.models.DataBoxEdgeSku]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2020-05-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2020-05-01-preview")
)
cls: ClsType[_models.DataBoxEdgeSkuList] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.METHOD_NAME.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DataBoxEdgeSkuList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
METHOD_NAME.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.DataBoxEdge/availableSkus"} |
298,582 | clean output files | '''
Weathering Outputter
'''
import os
from glob import glob
from geojson import dump
from colander import SchemaNode, String, drop
from .outputter import Outputter, BaseOutputterSchema
class BaseMassBalanceOutputter(Outputter):
"""
Base class for outputters that need to return results of the mass balance:
i.e. averaged properties of the LEs
"""
units = {'default': 'kg',
'avg_density': 'kg/m^3',
'avg_viscosity': 'm^2/s'}
def gather_mass_balance_data(self, step_num):
# return a json-compatible dict of the mass_balance data
# only applies to forecast spill_container (Not uncertain)
sc = self.cache.load_timestep(step_num).items()[0]
output_info = {'model_time': sc.current_time_stamp}
output_info.update(sc.mass_balance)
self.logger.debug(self._pid + 'step_num: {0}'.format(step_num))
for name, val in output_info.items():
msg = ('\t{0}: {1}'.format(name, val))
self.logger.debug(msg)
return output_info
class WeatheringOutputSchema(BaseOutputterSchema):
output_dir = SchemaNode(
String(), missing=drop, save=True, update=True
)
class WeatheringOutput(BaseMassBalanceOutputter):
'''
class that outputs GNOME weathering results on a time step by time step basis
The output is the aggregation of properties for all LEs (aka Mass Balance)
for a particular time step.
There are a number of different things we would like to graph:
- Evaporation
- Dissolution
- Dissipation
- Biodegradation
- ???
'''
_schema = WeatheringOutputSchema
# Fixme: -- this is a do-nothing __init__
# only here to document the interface
# may need it in the future if we refactor out the output_dir handling
def __init__(self,
output_dir=None, # default is to not output to file
**kwargs):
'''
:param str output_dir=None: output directory for the json files.
If not directory is provided, files will not be written.
other arguments as defined in the Outputter class
'''
super(WeatheringOutput, self).__init__(output_dir=output_dir,
**kwargs)
def write_output(self, step_num, islast_step=False):
'''
Weathering data is only output for forecast spill container, not
the uncertain spill container. This is because Weathering has its
own uncertainty and mixing the two was giving weird results. The
cloned models that are modeling weathering uncertainty do not include
the uncertain spill container.
'''
super(WeatheringOutput, self).write_output(step_num, islast_step)
if not self._write_step:
return None
output_info = self.gather_mass_balance_data(step_num)
# convert to string
output_info['time_stamp'] = output_info.pop('model_time').isoformat()
if self.output_dir:
output_filename = self.output_to_file(output_info, step_num)
output_info.update({'output_filename': output_filename})
return output_info
def output_to_file(self, json_content, step_num):
file_format = 'mass_balance_{0:06d}.json'
filename = os.path.join(self.output_dir,
file_format.format(step_num))
with open(filename, 'w', encoding='utf-8') as outfile:
dump(json_content, outfile, indent=4)
return filename
def METHOD_NAME(self):
if self.output_dir:
files = glob(os.path.join(self.output_dir,
'mass_balance_*.json'))
for f in files:
os.remove(f)
# just use the base class(s) one -- nothing to do here
# cleaning out the files is done in prepare_for_model_run
# def rewind(self):
# 'remove previously written files'
# super(WeatheringOutput, self).rewind()
# self.clean_output_files()
def __getstate__(self):
'''
This is to support pickle.dumps() inside the uncertainty model
subprocesses.
We need to be able to pickle our weathering outputters so that
our uncertainty subprocesses can send them back to the parent
process through a message queue.
And the cache attribute (specifically, the ElementCache.lock
attribute) can not be pickled, and instead produces a
RuntimeError.
(Note: The __setstate__() probably doesn't need to recreate the
ElementCache since it will be created inside the
Model.setup_model_run() function.)
'''
odict = self.__dict__.copy() # copy the dict since we change it
del odict['cache'] # remove cache entry
return odict |
298,583 | initialize process | from System.Text.RegularExpressions import *
from Deadline.Plugins import *
from Deadline.Scripting import *
import _winreg
######################################################################
# This is the function that Deadline calls to get an instance of the
# main DeadlinePlugin class.
######################################################################
def GetDeadlinePlugin():
return CelActionPlugin()
def CleanupDeadlinePlugin(deadlinePlugin):
deadlinePlugin.Cleanup()
######################################################################
# This is the main DeadlinePlugin class for the CelAction plugin.
######################################################################
class CelActionPlugin(DeadlinePlugin):
def __init__(self):
self.InitializeProcessCallback += self.METHOD_NAME
self.RenderExecutableCallback += self.RenderExecutable
self.RenderArgumentCallback += self.RenderArgument
self.StartupDirectoryCallback += self.StartupDirectory
def Cleanup(self):
for stdoutHandler in self.StdoutHandlers:
del stdoutHandler.HandleCallback
del self.InitializeProcessCallback
del self.RenderExecutableCallback
del self.RenderArgumentCallback
del self.StartupDirectoryCallback
def GetCelActionRegistryKey(self):
# Modify registry for frame separation
path = r'Software\CelAction\CelAction2D\User Settings'
_winreg.CreateKey(_winreg.HKEY_CURRENT_USER, path)
regKey = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, path, 0,
_winreg.KEY_ALL_ACCESS)
return regKey
def GetSeparatorValue(self, regKey):
useSeparator, _ = _winreg.QueryValueEx(
regKey, 'RenderNameUseSeparator')
separator, _ = _winreg.QueryValueEx(regKey, 'RenderNameSeparator')
return useSeparator, separator
def SetSeparatorValue(self, regKey, useSeparator, separator):
_winreg.SetValueEx(regKey, 'RenderNameUseSeparator',
0, _winreg.REG_DWORD, useSeparator)
_winreg.SetValueEx(regKey, 'RenderNameSeparator',
0, _winreg.REG_SZ, separator)
def METHOD_NAME(self):
# Set the plugin specific settings.
self.SingleFramesOnly = False
# Set the process specific settings.
self.StdoutHandling = True
self.PopupHandling = True
# Ignore 'celaction' Pop-up dialog
self.AddPopupIgnorer(".*Rendering.*")
self.AddPopupIgnorer(".*AutoRender.*")
# Ignore 'celaction' Pop-up dialog
self.AddPopupIgnorer(".*Wait.*")
# Ignore 'celaction' Pop-up dialog
self.AddPopupIgnorer(".*Timeline Scrub.*")
celActionRegKey = self.GetCelActionRegistryKey()
self.SetSeparatorValue(celActionRegKey, 1, self.GetConfigEntryWithDefault(
"RenderNameSeparator", ".").strip())
def RenderExecutable(self):
return RepositoryUtils.CheckPathMapping(self.GetConfigEntry("Executable").strip())
def RenderArgument(self):
arguments = RepositoryUtils.CheckPathMapping(
self.GetPluginInfoEntry("Arguments").strip())
arguments = arguments.replace(
"<STARTFRAME>", str(self.GetStartFrame()))
arguments = arguments.replace("<ENDFRAME>", str(self.GetEndFrame()))
arguments = self.ReplacePaddedFrame(
arguments, "<STARTFRAME%([0-9]+)>", self.GetStartFrame())
arguments = self.ReplacePaddedFrame(
arguments, "<ENDFRAME%([0-9]+)>", self.GetEndFrame())
arguments = arguments.replace("<QUOTE>", "\"")
return arguments
def StartupDirectory(self):
return self.GetPluginInfoEntryWithDefault("StartupDirectory", "").strip()
def ReplacePaddedFrame(self, arguments, pattern, frame):
frameRegex = Regex(pattern)
while True:
frameMatch = frameRegex.Match(arguments)
if frameMatch.Success:
paddingSize = int(frameMatch.Groups[1].Value)
if paddingSize > 0:
padding = StringUtils.ToZeroPaddedString(
frame, paddingSize, False)
else:
padding = str(frame)
arguments = arguments.replace(
frameMatch.Groups[0].Value, padding)
else:
break
return arguments |
298,584 | init structure | from math import ceil
import numpy as np
from parcels.interaction.neighborsearch.base import BaseSphericalNeighborSearch
from parcels.interaction.neighborsearch.basehash import (
BaseHashNeighborSearch,
hash_split,
)
class HashSphericalNeighborSearch(BaseHashNeighborSearch,
BaseSphericalNeighborSearch):
"""Neighbor search using a hashtable (similar to octtrees).
Parameters
----------
inter_dist_vert : float
Interaction distance (vertical) in m.
inter_dist_horiz : float
interaction distance (horizontal) in m
max_depth : float, optional
Maximum depth of the particles (default is 100000m).
"""
def __init__(self, inter_dist_vert, inter_dist_horiz,
max_depth=100000):
super().__init__(inter_dist_vert, inter_dist_horiz, max_depth)
self.METHOD_NAME()
def _find_neighbors(self, hash_id, coor):
"""Get neighbors from hash_id and location."""
# Get the neighboring cells.
neighbor_blocks = geo_hash_to_neighbors(
hash_id, coor, self._bits, self.inter_arc_dist)
all_neighbor_points = []
# Get the particles from the neighboring cells.
for block in neighbor_blocks:
try:
all_neighbor_points.extend(self._hashtable[block])
except KeyError:
pass
potential_neighbors = np.array(all_neighbor_points, dtype=int)
return self._get_close_neighbor_dist(coor, potential_neighbors)
def _values_to_hashes(self, values, active_idx=None):
"""Convert coordinates to cell ids.
Parameters
----------
values :
array of positions of particles to convert
([depth, lat, lon], # of particles to convert).
active_idx :
(Default value = None)
Returns
-------
type
array of cell ids.
"""
if active_idx is None:
active_idx = np.arange(values.shape[1], dtype=int)
depth = values[0, active_idx]
lat = values[1, active_idx]
lon = values[2, active_idx]
# Southern or Nothern hemisphere.
lat_sign = (lat > 0).astype(int)
# Find the lattitude part of the cell id.
i_depth = np.floor(depth/self.inter_dist_vert).astype(int)
i_lat = np.floor(np.abs(lat)/self.inter_degree_dist).astype(int)
# Get the arc length of the smaller circle around the earth.
circ_small = 2*np.pi*np.cos((i_lat+1)*self.inter_arc_dist)
n_lon = np.floor(circ_small/self.inter_arc_dist).astype(int)
n_lon[n_lon < 1] = 1
d_lon = 360/n_lon
# Get the longitude part of the cell id.
i_lon = np.floor(lon/d_lon).astype(int)
# Merge the 4 parts of the cell into one id.
point_hash = i_3d_to_hash(i_depth, i_lat, i_lon, lat_sign, self._bits)
point_array = np.empty(values.shape[1], dtype=int)
point_array[active_idx] = point_hash
return point_array
def rebuild(self, values, active_mask=-1):
"""Recreate the tree with new values.
Parameters
----------
values :
positions of the particles.
active_mask :
(Default value = -1)
"""
super().rebuild(values, active_mask)
active_idx = self.active_idx
# Compute the hash values:
self._particle_hashes = np.empty(self._values.shape[1], dtype=int)
self._particle_hashes[active_idx] = self._values_to_hashes(
values[:, active_idx])
# Create the hashtable.
self._hashtable = hash_split(self._particle_hashes,
active_idx=active_idx)
# Keep track of the position of a particle index within a cell.
self._hash_idx = np.empty_like(self._particle_hashes, dtype=int)
for idx_array in self._hashtable.values():
self._hash_idx[idx_array] = np.arange(len(idx_array))
def METHOD_NAME(self):
"""Initialize the basic tree properties without building"""
epsilon = 1e-12
R_earth = 6371000
self.inter_arc_dist = self.inter_dist_horiz/R_earth
self.inter_degree_dist = 180*self.inter_arc_dist/np.pi
n_lines_depth = int(ceil(
self.max_depth/self.inter_dist_vert + epsilon))
n_lines_lat = int(ceil(np.pi/self.inter_arc_dist+epsilon))
n_lines_lon = int(ceil(2*np.pi/self.inter_arc_dist+epsilon))
n_bits_lat = ceil(np.log(n_lines_lat)/np.log(2))
n_bits_lon = ceil(np.log(n_lines_lon)/np.log(2))
n_bits_depth = ceil(np.log(n_lines_depth)/np.log(2))
self._bits = np.array([n_bits_depth, n_bits_lat, n_bits_lon])
def i_3d_to_hash(i_depth, i_lat, i_lon, lat_sign, bits):
"""Convert longitude and lattitude id's to hash"""
point_hash = lat_sign
point_hash = np.bitwise_or(point_hash, np.left_shift(i_depth, 1))
point_hash = np.bitwise_or(point_hash, np.left_shift(i_lat, 1+bits[0]))
point_hash = np.bitwise_or(point_hash, np.left_shift(i_lon, 1+bits[0]+bits[1]))
return point_hash
def geo_hash_to_neighbors(hash_id, coor, bits, inter_arc_dist):
"""Compute the hashes of all neighboring cells in a 3x3x3 neighborhood."""
lat_sign = hash_id & 0x1
i_depth = (hash_id >> 1) & ((1 << bits[0])-1)
i_lat = (hash_id >> (1+bits[0])) & ((1 << bits[1])-1)
def all_neigh_depth(i_lat, i_lon, lat_sign):
hashes = []
for d_depth in [-1, 0, 1]:
new_depth = i_depth + d_depth
if new_depth < 0:
continue
hashes.append(
i_3d_to_hash(new_depth, i_lat, i_lon, lat_sign, bits))
return hashes
neighbors = []
# Loop over lower row, middle row, upper row
for i_d_lat in [-1, 0, 1]:
new_lat_sign = lat_sign
new_i_lat = i_lat + i_d_lat
if new_i_lat == -1:
new_i_lat = 0
new_lat_sign = (1-lat_sign)
min_lat = new_i_lat + 1
circ_small = 2*np.pi*np.cos(min_lat*inter_arc_dist)
n_new_lon = int(max(1, np.floor(circ_small/inter_arc_dist)))
d_lon = 360/n_new_lon
if n_new_lon <= 3:
for new_i_lon in range(n_new_lon):
neighbors.extend(
all_neigh_depth(new_i_lat, new_i_lon, new_lat_sign))
else:
start_i_lon = int(np.floor(coor[2]/d_lon))
for delta_lon in [-1, 0, 1]:
new_i_lon = (start_i_lon+delta_lon+n_new_lon) % n_new_lon
neighbors.extend(
all_neigh_depth(new_i_lat, new_i_lon, new_lat_sign))
return neighbors |
298,585 | construct data reader | import functools
import operator
import os
import os.path
import sys
import numpy as np
import scipy.special
import pytest
# Bamboo utilities
current_file = os.path.realpath(__file__)
current_dir = os.path.dirname(current_file)
sys.path.insert(0, os.path.join(os.path.dirname(current_dir), 'common_python'))
import tools
# ==============================================
# Objects for Python data reader
# ==============================================
# Note: The Python data reader imports this file as a module and calls
# the functions below to ingest data.
# Data
np.random.seed(20200909)
_num_samples = 16
_num_channels = 9
_input_size = 5
_hidden_size = 7
_sample_size = _num_channels*_input_size + _num_channels *_hidden_size
_samples = np.random.uniform(low=-1, high=1, size=(_num_samples,_sample_size))
_samples = _samples.astype(np.float32)
# Sample access functions
def get_sample(index):
return _samples[index,:]
def num_samples():
return _num_samples
def sample_dims():
return (_sample_size,)
# ==============================================
# NumPy implementation
# ==============================================
def numpy_gru_cell(x, h, w):
#
# This implements a 2 dimensional analogue of the PyTorch.nn.GRUCell
# See here for more details:
# https://pytorch.org/docs/stable/generated/torch.nn.GRUCell.html#torch.nn.GRUCell
#
# Dimensions
input_size = x[0].size
hidden_size = h[0].size
# Unroll GRU
for sample in range(x.shape[0]):
ih = np.matmul(w[0], x[sample]) + w[1]
hh = np.matmul(w[2], h[sample]) + w[3]
r = scipy.special.expit(ih[:hidden_size] + hh[:hidden_size])
z = scipy.special.expit(ih[hidden_size:2*hidden_size] + hh[hidden_size:2*hidden_size])
n = np.tanh(ih[2*hidden_size:] + r*hh[2*hidden_size:])
h[sample] = (1-z)*n + z*h[sample]
return h
# ==============================================
# Setup LBANN experiment
# ==============================================
def setup_experiment(lbann, weekly):
"""Construct LBANN experiment.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Skip test on non-GPU systems
# Note: Test requires cuDNN (on GPU) or oneDNN (on CPU).
### @todo Assume LBANN has been built with oneDNN?
if not tools.gpus_per_node(lbann):
message = f'{os.path.basename(__file__)} requires cuDNN or oneDNN'
print('Skip - ' + message)
pytest.skip(message)
mini_batch_size = num_samples() // 2
trainer = lbann.Trainer(mini_batch_size)
model = construct_model(lbann)
data_reader = METHOD_NAME(lbann)
optimizer = lbann.SGD()
return trainer, model, data_reader, optimizer, None # Don't request any specific number of nodes
def construct_model(lbann):
"""Construct LBANN model.
Args:
lbann (module): Module for LBANN Python frontend
"""
from lbann.modules.rnn import ChannelwiseGRU
# Input data
# Note: Sum with a weights layer so that gradient checking will
# verify that error signals are correct.
x_weights = lbann.Weights(initializer=lbann.ConstantInitializer(value=0.0),
name='input')
h_weights = lbann.Weights(initializer=lbann.ConstantInitializer(value=0.0),
name='inital_hidden')
input_ = lbann.Input(data_field='samples')
input_slice = lbann.Slice(
input_,
slice_points=[0, _num_channels*_input_size, _sample_size],
)
x = lbann.Reshape(input_slice, dims=[_num_channels,_input_size], name="input_reshape")
x = lbann.Sum(x, lbann.WeightsLayer(weights=x_weights, dims=[_num_channels,_input_size]), name="input_sum")
h = lbann.Reshape(input_slice, dims=[_num_channels,_hidden_size],name="hidden_reshape")
h = lbann.Sum(h, lbann.WeightsLayer(weights=h_weights, dims=[_num_channels,_hidden_size]), name="input_hidden_sum")
x_lbann = x
h_lbann = h
# Objects for LBANN model
obj = []
metrics = []
callbacks = []
# Weights
rnn_weights_numpy = []
ih_matrix = np.random.uniform(
low=-1,
high=1,
size=(3*_hidden_size,_input_size),
)
hh_matrix = np.random.uniform(
low=-1,
high=1,
size=(3*_hidden_size,_hidden_size),
)
ih_bias = np.random.uniform(low=-1, high=1, size=(3*_hidden_size,))
hh_bias = np.random.uniform(low=-1, high=1, size=(3*_hidden_size,))
rnn_weights_numpy.extend([ih_matrix, ih_bias, hh_matrix, hh_bias])
rnn_weights_lbann = [
lbann.Weights(
initializer=lbann.ValueInitializer(
values=np.nditer(w, order='F')))
for w in rnn_weights_numpy
]
# LBANN implementation
x = x_lbann
h = h_lbann
channelwise_GRU_cell = ChannelwiseGRU(num_channels=_num_channels,
size=_hidden_size,
weights=rnn_weights_lbann)
y = channelwise_GRU_cell(x, h)
z = lbann.L2Norm2(y)
obj.append(z)
metrics.append(lbann.Metric(z, name="Multi-channel, Unidirectional, GRU Cell"))
# NumPy implementation
vals = []
for i in range(num_samples()):
input_ = get_sample(i).astype(np.float64)
x = input_[:_num_channels*_input_size].reshape((_num_channels,_input_size))
h = input_[_num_channels*_input_size:].reshape((_num_channels,_hidden_size))
y = numpy_gru_cell(x, h, rnn_weights_numpy)
z = tools.numpy_l2norm2(y)
vals.append(z)
val = np.mean(vals)
tol = 8 * val * np.finfo(np.float32).eps
callbacks.append(lbann.CallbackPrintModelDescription())
callbacks.append(lbann.CallbackCheckMetric(
metric=metrics[-1].name,
lower_bound=val-tol,
upper_bound=val+tol,
error_on_failure=True,
execution_modes='test'))
# ------------------------------------------
# Gradient checking
# ------------------------------------------
callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))
# ------------------------------------------
# Construct model
# ------------------------------------------
num_epochs = 0
return lbann.Model(num_epochs,
layers=lbann.traverse_layer_graph(x_lbann),
objective_function=obj,
metrics=metrics,
callbacks=callbacks)
def METHOD_NAME(lbann):
"""Construct Protobuf message for Python data reader.
The Python data reader will import the current Python file to
access the sample access functions.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Note: The training data reader should be removed when
# https://github.com/LLNL/lbann/issues/1098 is resolved.
message = lbann.reader_pb2.DataReader()
message.reader.extend([
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'train'
)
])
message.reader.extend([
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'test'
)
])
return message
# ==============================================
# Setup PyTest
# ==============================================
# Create test functions that can interact with PyTest
for _test_func in tools.create_tests(setup_experiment, __file__):
globals()[_test_func.__name__] = _test_func |
298,586 | alias txn | # Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Iterable, List, Optional, Sequence, Tuple
import attr
from synapse.api.errors import SynapseError
from synapse.storage.database import LoggingTransaction
from synapse.storage.databases.main import CacheInvalidationWorkerStore
from synapse.types import RoomAlias
from synapse.util.caches.descriptors import cached
@attr.s(slots=True, frozen=True, auto_attribs=True)
class RoomAliasMapping:
room_id: str
room_alias: str
servers: List[str]
class DirectoryWorkerStore(CacheInvalidationWorkerStore):
async def get_association_from_room_alias(
self, room_alias: RoomAlias
) -> Optional[RoomAliasMapping]:
"""Gets the room_id and server list for a given room_alias
Args:
room_alias: The alias to translate to an ID.
Returns:
The room alias mapping or None if no association can be found.
"""
room_id = await self.db_pool.simple_select_one_onecol(
"room_aliases",
{"room_alias": room_alias.to_string()},
"room_id",
allow_none=True,
desc="get_association_from_room_alias",
)
if not room_id:
return None
servers = await self.db_pool.simple_select_onecol(
"room_alias_servers",
{"room_alias": room_alias.to_string()},
"server",
desc="get_association_from_room_alias",
)
if not servers:
return None
return RoomAliasMapping(room_id, room_alias.to_string(), servers)
async def get_room_alias_creator(self, room_alias: str) -> str:
return await self.db_pool.simple_select_one_onecol(
table="room_aliases",
keyvalues={"room_alias": room_alias},
retcol="creator",
desc="get_room_alias_creator",
)
@cached(max_entries=5000)
async def get_aliases_for_room(self, room_id: str) -> Sequence[str]:
return await self.db_pool.simple_select_onecol(
"room_aliases",
{"room_id": room_id},
"room_alias",
desc="get_aliases_for_room",
)
async def create_room_alias_association(
self,
room_alias: RoomAlias,
room_id: str,
servers: Iterable[str],
creator: Optional[str] = None,
) -> None:
"""Creates an association between a room alias and room_id/servers
Args:
room_alias: The alias to create.
room_id: The target of the alias.
servers: A list of servers through which it may be possible to join the room
creator: Optional user_id of creator.
"""
def METHOD_NAME(txn: LoggingTransaction) -> None:
self.db_pool.simple_insert_txn(
txn,
"room_aliases",
{
"room_alias": room_alias.to_string(),
"room_id": room_id,
"creator": creator,
},
)
self.db_pool.simple_insert_many_txn(
txn,
table="room_alias_servers",
keys=("room_alias", "server"),
values=[(room_alias.to_string(), server) for server in servers],
)
self._invalidate_cache_and_stream(
txn, self.get_aliases_for_room, (room_id,)
)
try:
await self.db_pool.runInteraction(
"create_room_alias_association", METHOD_NAME
)
except self.database_engine.module.IntegrityError:
raise SynapseError(
409, "Room alias %s already exists" % room_alias.to_string()
)
async def delete_room_alias(self, room_alias: RoomAlias) -> Optional[str]:
room_id = await self.db_pool.runInteraction(
"delete_room_alias", self._delete_room_alias_txn, room_alias
)
return room_id
def _delete_room_alias_txn(
self, txn: LoggingTransaction, room_alias: RoomAlias
) -> Optional[str]:
txn.execute(
"SELECT room_id FROM room_aliases WHERE room_alias = ?",
(room_alias.to_string(),),
)
res = txn.fetchone()
if res:
room_id = res[0]
else:
return None
txn.execute(
"DELETE FROM room_aliases WHERE room_alias = ?", (room_alias.to_string(),)
)
txn.execute(
"DELETE FROM room_alias_servers WHERE room_alias = ?",
(room_alias.to_string(),),
)
self._invalidate_cache_and_stream(txn, self.get_aliases_for_room, (room_id,))
return room_id
async def update_aliases_for_room(
self,
old_room_id: str,
new_room_id: str,
creator: Optional[str] = None,
) -> None:
"""Repoint all of the aliases for a given room, to a different room.
Args:
old_room_id:
new_room_id:
creator: The user to record as the creator of the new mapping.
If None, the creator will be left unchanged.
"""
def _update_aliases_for_room_txn(txn: LoggingTransaction) -> None:
update_creator_sql = ""
sql_params: Tuple[str, ...] = (new_room_id, old_room_id)
if creator:
update_creator_sql = ", creator = ?"
sql_params = (new_room_id, creator, old_room_id)
sql = "UPDATE room_aliases SET room_id = ? %s WHERE room_id = ?" % (
update_creator_sql,
)
txn.execute(sql, sql_params)
self._invalidate_cache_and_stream(
txn, self.get_aliases_for_room, (old_room_id,)
)
self._invalidate_cache_and_stream(
txn, self.get_aliases_for_room, (new_room_id,)
)
await self.db_pool.runInteraction(
"_update_aliases_for_room_txn", _update_aliases_for_room_txn
)
class DirectoryStore(DirectoryWorkerStore):
pass |
298,587 | dirichlet | #!/usr/bin/env python3
#
import argparse
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
rc('text', usetex=True)
# 装饰子:指明被装饰函数输入的是笛卡尔坐标点
from fealpy.decorator import cartesian
# 网格工厂:生成常用的简单区域上的网格
from fealpy.mesh import MeshFactory as MF
from fealpy.mesh import HalfEdgeMesh2d
# 均匀剖分的时间离散
from fealpy.timeintegratoralg import UniformTimeLine
# 热传导 pde 模型
from fealpy.pde.heatequation_model_2d import ExpExpData
# Lagrange 有限元空间
from fealpy.functionspace import LagrangeFiniteElementSpace
# Dirichlet 边界条件
from fealpy.boundarycondition import DirichletBC
from fealpy.tools.show import showmultirate
# solver
from scipy.sparse.linalg import spsolve
#拷贝对象
import copy
## 参数解析
parser = argparse.ArgumentParser(description=
"""
三角形网格自适应有限元方法求解热传导方程
""")
parser.add_argument('--ns',
default=10, type=int,
help='空间各个方向剖分段数, 默认剖分 10 段.')
parser.add_argument('--nt',
default=100, type=int,
help='时间剖分段数,默认剖分 100 段.')
parser.add_argument('--tol',
default=0.05, type=float,
help='自适应加密停止阈值,默认设定为 0.05.')
parser.add_argument('--rtheta',
default=0.7, type=float,
help='自适应加密参数,默认设定为 0.7.')
parser.add_argument('--ctheta',
default=0.3, type=float,
help='自适应粗化参数,默认设定为 0.3.')
args = parser.parse_args()
ns = args.ns
nt = args.nt
tol = args.tol
rtheta = args.rtheta
ctheta = args.ctheta
pde = ExpExpData()
domain = pde.domain()
c = pde.diffusionCoefficient
tmesh = UniformTimeLine(0, 1, nt) # 均匀时间剖分
smesh = MF.boxmesh2d(domain, nx=ns, ny=ns, meshtype='tri')
smesh = HalfEdgeMesh2d.from_mesh(smesh, NV=3) # 三角形网格的单边数据结构
smesh.add_plot(plt)
plt.savefig('./test-' + str(0) + '.png')
plt.close()
i = 0
while True:
# 初始网格的自适应
space = LagrangeFiniteElementSpace(smesh, p=1) # 构造线性元空间
# 当前时间步的有限元解
uh0 = space.interpolation(pde.init_value)
eta = space.recovery_estimate(uh0, method='area_harmonic')
err = np.sqrt(np.sum(eta**2))
if err < tol:
break
isMarkedCell = smesh.refine_marker(eta, rtheta, method='L2')
smesh.refine_triangle_rg(isMarkedCell)
i += 1
smesh.add_plot(plt)
plt.savefig('./test-' + str(i+1) + '.png')
plt.close()
space = LagrangeFiniteElementSpace(smesh, p=1)
uh0 = space.interpolation(pde.init_value)
for j in range(0, nt):
# 下一个的时间层 t1
t1 = tmesh.next_time_level()
print("t1=", t1)
while True:
# 下一层时间步的有限元解
uh1 = space.function()
A = c*space.stiff_matrix() # 刚度矩阵
M = space.mass_matrix() # 质量矩阵
dt = tmesh.current_time_step_length() # 时间步长
G = M + dt*A # 隐式迭代矩阵
# t1 时间层的右端项
@cartesian
def source(p):
return pde.source(p, t1)
F = space.source_vector(source)
F *= dt
F += M@uh0
# t1 时间层的 Dirichlet 边界条件处理
@cartesian
def METHOD_NAME(p):
return pde.METHOD_NAME(p, t1)
bc = DirichletBC(space, METHOD_NAME)
GD, F = bc.apply(G, F, uh1)
# 代数系统求解
uh1[:] = spsolve(GD, F)
eta = space.recovery_estimate(uh1, method='area_harmonic')
err = np.sqrt(np.sum(eta**2))
print('errrefine', err)
if err < tol:
break
else:
#加密并插值
NN0 = smesh.number_of_nodes()
edge = smesh.entity('edge')
isMarkedCell = smesh.refine_marker(eta, rtheta, method='L2')
smesh.refine_triangle_rg(isMarkedCell)
i += 1
smesh.add_plot(plt)
plt.savefig('./test-'+str(i+1)+'.png')
plt.close()
space = LagrangeFiniteElementSpace(smesh, p=1)
print('refinedof', space.number_of_global_dofs())
uh00 = space.function()
nn2e = smesh.newnode2edge
uh00[:NN0] = uh0
uh00[NN0:] = np.average(uh0[edge[nn2e]], axis=-1)
uh0 = space.function()
uh0[:] = uh00
#粗化网格并插值
isMarkedCell = smesh.refine_marker(eta, ctheta, 'COARSEN')
smesh.coarsen_triangle_rg(isMarkedCell)
i += 1
smesh.add_plot(plt)
plt.savefig('./test-'+str(i+1)+'.png')
plt.close()
space = LagrangeFiniteElementSpace(smesh, p=1)
print('coarsendof', space.number_of_global_dofs())
uh2 = space.function()
retain = smesh.retainnode
uh2[:] = uh1[retain]
uh1 = space.function()
uh0 = space.function()
uh1[:] = uh2
# t1 时间层的误差
@cartesian
def solution(p):
return pde.solution(p, t1)
error = space.integralalg.error(solution, uh1)
print("error:", error)
#画数值解图像
if (t1 ==0.01) | (t1 == 0.49) | (t1==0.99):
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1, projection='3d')
uh1.add_plot(axes, cmap='rainbow')
uh0[:] = uh1
uh1[:] = 0.0
# 时间步进一层
tmesh.advance()
plt.show() |
298,588 | test standard item access throws on unknown | from unittesting import DeferrableTestCase
from SublimeLinter.lint.linter import LintMatch
class TestLooseLintMatch(DeferrableTestCase):
def test_attribute_access(self):
m = object()
match = {
"match": m,
"line": 1,
"col": 2,
"error": "error_txt",
"warning": "warning_txt",
"message": "message_txt",
"near": "near_txt"
}
rv = LintMatch(**match)
self.assertEqual(rv.match, m)
self.assertEqual(rv.line, 1)
self.assertEqual(rv.col, 2)
self.assertEqual(rv.error, "error_txt")
self.assertEqual(rv.warning, "warning_txt")
self.assertEqual(rv.message, "message_txt")
self.assertEqual(rv.near, "near_txt")
def test_attribute_access_returns_defaults_for_missing_common_names(self):
rv = LintMatch()
for k in (
"match", "line", "col", "error", "warning", "message", "near",
"filename", "error_type", "code", "end_line", "end_col",
):
self.assertEqual(getattr(rv, k), '' if k == 'message' else None)
def test_unknown_keys_raise_on_attribute_access(self):
rv = LintMatch()
try:
rv.foo
except AttributeError as e:
self.assertEqual(str(e), "'LintMatch' object has no attribute 'foo'")
except Exception:
self.fail('Should have thrown AttributeError.')
else:
self.fail('Should have thrown AttributeError.')
def test_self_repr(self):
rv = LintMatch(foo='bar')
self.assertEqual(str(rv), "LintMatch({'foo': 'bar'})")
self.assertEqual(eval(repr(rv)), rv)
def test_copy_lint_match(self):
rv = LintMatch(foo='bar')
self.assertEqual(rv.copy(), rv)
self.assertEqual(type(rv.copy()), LintMatch)
def test_double_star_unpacking_to_dict(self):
m = object()
match = {
"match": m,
"line": 1,
"col": 2,
"error": "error_txt",
"warning": "warning_txt",
"message": "message_txt",
"near": "near_txt"
}
expected = LintMatch(match)
actual = dict(**expected)
self.assertEqual(actual, expected)
def test_tuple_like_unpacking(self):
m = object()
match = {
"match": m,
"line": 1,
"col": 2,
"error": "error_txt",
"warning": "warning_txt",
"message": "message_txt",
"near": "near_txt"
}
rv = LintMatch(**match)
match, line, col, error, warning, message, near = rv
self.assertEqual(match, m)
self.assertEqual(line, 1)
self.assertEqual(col, 2)
self.assertEqual(error, "error_txt")
self.assertEqual(warning, "warning_txt")
self.assertEqual(message, "message_txt")
self.assertEqual(near, "near_txt")
def test_tuple_like_index_access(self):
m = object()
match = {
"match": m,
"line": 1,
"col": 2,
"error": "error_txt",
"warning": "warning_txt",
"message": "message_txt",
"near": "near_txt"
}
rv = LintMatch(**match)
self.assertEqual(rv[0], m)
self.assertEqual(rv[1], 1)
self.assertEqual(rv[2], 2)
self.assertEqual(rv[3], "error_txt")
self.assertEqual(rv[4], "warning_txt")
self.assertEqual(rv[5], "message_txt")
self.assertEqual(rv[6], "near_txt")
self.assertRaises(IndexError, lambda: rv[7])
def test_namedtuple_like_mutating(self):
rv = LintMatch({'foo': 'bar'})
rv2 = rv._replace(foo='baz')
self.assertEqual(rv2.foo, 'baz')
# unlike namedtuple LintMatch is mutable
self.assertEqual(rv.foo, 'baz')
def test_standard_items_access(self):
m = object()
match = {
"match": m,
"line": 1,
"col": 2,
"error": "error_txt",
"warning": "warning_txt",
"message": "message_txt",
"near": "near_txt"
}
rv = LintMatch(**match)
self.assertEqual(rv['match'], m)
self.assertEqual(rv['line'], 1)
self.assertEqual(rv['col'], 2)
self.assertEqual(rv['error'], "error_txt")
self.assertEqual(rv['warning'], "warning_txt")
self.assertEqual(rv['message'], "message_txt")
self.assertEqual(rv['near'], "near_txt")
def METHOD_NAME(self):
rv = LintMatch()
self.assertRaises(KeyError, lambda: rv['line'])
def test_create_from_tuple(self):
m = object()
match = (m, 1, 2, "error_txt", "warning_txt", "message_txt", "near_txt")
actual = LintMatch(*match)
expected = LintMatch({
"match": m,
"line": 1,
"col": 2,
"error": "error_txt",
"warning": "warning_txt",
"message": "message_txt",
"near": "near_txt"
})
self.assertEqual(actual, expected) |
298,589 | validate handles | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import unittest
import pytest
from azure.storage.fileshare.aio import ShareServiceClient
from devtools_testutils.aio import recorded_by_proxy_async
from devtools_testutils.storage.aio import AsyncStorageRecordedTestCase
from settings.testcase import FileSharePreparer
# ------------------------------------------------------------------------------
TEST_SHARE_NAME = 'test-share'
# ------------------------------------------------------------------------------
class TestStorageHandleAsync(AsyncStorageRecordedTestCase):
def _setup(self, storage_account, storage_account_key):
file_url = self.account_url(storage_account, "file")
credentials = storage_account_key
self.fsc = ShareServiceClient(account_url=file_url, credential=credentials)
# --Helpers-----------------------------------------------------------------
def METHOD_NAME(self, handles):
# Assert
assert handles is not None
assert len(handles) >= 1
assert handles[0] is not None
# verify basic fields
# path may or may not be present
# last_connect_time_string has been missing in the test
assert handles[0].id is not None
assert handles[0].file_id is not None
assert handles[0].parent_id is not None
assert handles[0].session_id is not None
assert handles[0].client_ip is not None
assert handles[0].open_time is not None
@pytest.mark.playback_test_only
@FileSharePreparer()
@recorded_by_proxy_async
async def test_close_single_handle(self, **kwargs):
storage_account_name = kwargs.pop("storage_account_name")
storage_account_key = kwargs.pop("storage_account_key")
# don't run live, since the test set up was highly manual
# only run when recording, or playing back in CI
self._setup(storage_account_name, storage_account_key)
share = self.fsc.get_share_client(TEST_SHARE_NAME)
root = share.get_directory_client()
handles = []
async for handle in root.list_handles(recursive=True):
handles.append(handle)
self.METHOD_NAME(handles)
# Act
with pytest.raises(ValueError):
await root.close_handle('*')
handles_info = await root.close_handle(handles[0])
# Assert 1 handle has been closed
assert 1 == handles_info['closed_handles_count']
assert handles_info['failed_handles_count'] == 0
@pytest.mark.playback_test_only
@FileSharePreparer()
@recorded_by_proxy_async
async def test_close_all_handle(self, **kwargs):
storage_account_name = kwargs.pop("storage_account_name")
storage_account_key = kwargs.pop("storage_account_key")
# don't run live, since the test set up was highly manual
# only run when recording, or playing back in CI
self._setup(storage_account_name, storage_account_key)
share = self.fsc.get_share_client(TEST_SHARE_NAME)
root = share.get_directory_client()
handles = []
async for handle in root.list_handles(recursive=True):
handles.append(handle)
self.METHOD_NAME(handles)
# Act
handles_info = await root.close_all_handles(recursive=True)
# Assert at least 1 handle has been closed
assert handles_info['closed_handles_count'] > 1
assert handles_info['failed_handles_count'] == 0
@pytest.mark.playback_test_only
@FileSharePreparer()
@recorded_by_proxy_async
async def test_list_handles_access_rights(self, **kwargs):
storage_account_name = kwargs.pop("storage_account_name")
storage_account_key = kwargs.pop("storage_account_key")
# don't run live, since the test set up was highly manual
# only run when recording, or playing back in CI
self._setup(storage_account_name, storage_account_key)
share = self.fsc.get_share_client('testshare')
root = share.get_directory_client('testdir')
file_client = root.get_file_client('testfile.txt')
# Act
handles = []
async for handle in file_client.list_handles():
handles.append(handle)
# Assert
self.METHOD_NAME(handles)
handles[0]['access_rights'][0] == 'Write'
# ------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main() |
298,590 | test validate missing config item | # Python libs
import logging
import pytest
# Salt libs
from salt.beacons import twilio_txt_msg
# Salt testing libs
from tests.support.mock import MagicMock, patch
try:
import twilio
# Grab version, ensure elements are ints
twilio_version = tuple(int(x) for x in twilio.__version_info__)
if twilio_version > (5,):
TWILIO_5 = False
else:
TWILIO_5 = True
HAS_TWILIO = True
except ImportError:
HAS_TWILIO = False
log = logging.getLogger(__name__)
pytestmark = [
pytest.mark.skipif(HAS_TWILIO is False, reason="twilio.rest is not available"),
]
class MockTwilioRestException(Exception):
"""
Mock TwilioRestException class
"""
def __init__(self):
self.code = "error code"
self.msg = "Exception error"
self.status = "Not send"
super().__init__(self.msg)
class MockMessages:
"""
Mock SMS class
"""
flag = None
def __init__(self):
self.sid = "011"
self.price = "200"
self.price_unit = "1"
self.status = "Sent"
self.num_segments = "2"
self.num_media = "0"
self.body = None
self.date_sent = "01-01-2015"
self.date_created = "01-01-2015"
self.to = None
self.from_ = None
def create(self, body, to, from_):
"""
Mock create method
"""
msg = MockMessages()
if self.flag == 1:
raise MockTwilioRestException()
msg.body = body
msg.to = to
msg.from_ = from_
return msg
def list(self, to):
"""
Mock list method
"""
msg = MockMessages()
return [msg]
def delete(self):
"""
Mock delete method
"""
return None
class MockSMS:
"""
Mock SMS class
"""
def __init__(self):
self.messages = MockMessages()
class MockTwilioRestClient:
"""
Mock TwilioRestClient class
"""
def __init__(self):
if TWILIO_5:
self.sms = MockSMS()
else:
self.messages = MockMessages()
@pytest.fixture
def configure_loader_modules():
return {twilio_txt_msg: {}}
def test_validate_dictionary_config():
"""
Test empty configuration
"""
config = {}
ret = twilio_txt_msg.validate(config)
assert ret == (False, "Configuration for twilio_txt_msg beacon must be a list.")
def test_validate_empty_config():
"""
Test empty configuration
"""
config = [{}]
ret = twilio_txt_msg.validate(config)
assert ret == (
False,
"Configuration for twilio_txt_msg "
"beacon must contain account_sid, "
"auth_token and twilio_number items.",
)
def METHOD_NAME():
"""
Test empty configuration
"""
config = [
{
"account_sid": "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
"twilio_number": "+15555555555",
}
]
ret = twilio_txt_msg.validate(config)
assert ret == (
False,
"Configuration for twilio_txt_msg "
"beacon must contain account_sid, "
"auth_token and twilio_number items.",
)
def test_receive_message():
"""
Test receive a message
"""
config = [
{
"account_sid": "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
"auth_token": "my_token",
"twilio_number": "+15555555555",
}
]
ret = twilio_txt_msg.validate(config)
assert ret == (True, "Valid beacon configuration")
_expected_return = [
{
"texts": [
{
"body": "None",
"images": [],
"from": "None",
"id": "011",
"sent": "01-01-2015",
}
]
}
]
mock = MagicMock(return_value=MockTwilioRestClient())
with patch.object(twilio_txt_msg, "TwilioRestClient", mock):
ret = twilio_txt_msg.beacon(config)
assert ret == _expected_return |
298,591 | fix tmpl refs re | # Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
# See COPYING for license information.
import os
import re
from . import config
from . import userdir
from . import log
logger = log.setup_logger(__name__)
def get_var(l, key):
for s in l:
a = s.split()
if len(a) == 2 and a[0] == key:
return a[1]
return ''
def chk_var(l, key):
for s in l:
a = s.split()
if len(a) == 2 and a[0] == key and a[1]:
return True
return False
def chk_key(l, key):
for s in l:
a = s.split()
if len(a) >= 1 and a[0] == key:
return True
return False
def validate_template(l):
'Test for required stuff in a template.'
if not chk_var(l, '%name'):
logger.error("invalid template: missing '%name'")
return False
if not chk_key(l, '%generate'):
logger.error("invalid template: missing '%generate'")
return False
g = l.index('%generate')
if not (chk_key(l[0:g], '%required') or chk_key(l[0:g], '%optional')):
logger.error("invalid template: missing '%required' or '%optional'")
return False
return True
def fix_tmpl_refs(l, ident, pfx):
for i, tmpl in enumerate(l):
l[i] = tmpl.replace(ident, pfx)
def METHOD_NAME(l, regex, repl):
for i, tmpl in enumerate(l):
l[i] = re.sub(regex, repl, tmpl)
class LoadTemplate(object):
'''
Load a template and its dependencies, generate a
configuration file which should be relatively easy and
straightforward to parse.
'''
edit_instructions = '''# Edit instructions:
#
# Add content only at the end of lines starting with '%%'.
# Only add content, don't remove or replace anything.
# The parameters following '%required' are not optional,
# unlike those following '%optional'.
# You may also add comments for future reference.'''
no_more_edit = '''# Don't edit anything below this line.'''
def __init__(self, name):
self.name = name
self.all_pre_gen = []
self.all_post_gen = []
self.all_pfx = []
def new_pfx(self, name):
i = 1
pfx = name
while pfx in self.all_pfx:
pfx = "%s_%d" % (name, i)
i += 1
self.all_pfx.append(pfx)
return pfx
def generate(self):
return '\n'.join(
["# Configuration: %s" % self.name,
'',
self.edit_instructions,
'',
'\n'.join(self.all_pre_gen),
self.no_more_edit,
'',
'%generate',
'\n'.join(self.all_post_gen)])
def write_config(self, name):
try:
f = open("%s/%s" % (userdir.CRMCONF_DIR, name), "w")
except IOError as msg:
logger.error("open: %s", msg)
return False
print(self.generate(), file=f)
f.close()
return True
def load_template(self, tmpl):
try:
l = open(os.path.join(config.path.sharedir, 'templates', tmpl)).read().split('\n')
except IOError as msg:
logger.error("open: %s", msg)
return ''
if not validate_template(l):
return ''
logger.info("pulling in template %s", tmpl)
g = l.index('%generate')
pre_gen = l[0:g]
post_gen = l[g+1:]
name = get_var(pre_gen, '%name')
for s in l[0:g]:
if s.startswith('%depends_on'):
a = s.split()
if len(a) != 2:
logger.warning("%s: wrong usage", s)
continue
tmpl_id = a[1]
tmpl_pfx = self.load_template(a[1])
if tmpl_pfx:
fix_tmpl_refs(post_gen, '%'+tmpl_id, '%'+tmpl_pfx)
pfx = self.new_pfx(name)
fix_tmpl_refs(post_gen, '%_:', '%'+pfx+':')
# replace remaining %_, it may be useful at times
fix_tmpl_refs(post_gen, '%_', pfx)
v_idx = pre_gen.index('%required') or pre_gen.index('%optional')
pre_gen.insert(v_idx, '%pfx ' + pfx)
self.all_pre_gen += pre_gen
self.all_post_gen += post_gen
return pfx
def post_process(self, params):
pfx_re = '(%s)' % '|'.join(self.all_pfx)
for n in params:
fix_tmpl_refs(self.all_pre_gen, '%% '+n, "%% "+n+" "+params[n])
METHOD_NAME(self.all_post_gen,
'%' + pfx_re + '([^:]|$)', r'\1\2')
# process %if ... [%else] ... %fi
rmidx_l = []
if_seq = False
outcome = False # unnecessary, but to appease lints
for i in range(len(self.all_post_gen)):
s = self.all_post_gen[i]
if if_seq:
a = s.split()
if len(a) >= 1 and a[0] == '%fi':
if_seq = False
rmidx_l.append(i)
elif len(a) >= 1 and a[0] == '%else':
outcome = not outcome
rmidx_l.append(i)
else:
if not outcome:
rmidx_l.append(i)
continue
if not s:
continue
a = s.split()
if len(a) == 2 and a[0] == '%if':
outcome = not a[1].startswith('%') # not replaced -> false
if_seq = True
rmidx_l.append(i)
rmidx_l.reverse()
for i in rmidx_l:
del self.all_post_gen[i]
# vim:ts=4:sw=4:et: |
298,592 | type | from datetime import datetime
from sqlalchemy import Column, DateTime, Enum, ForeignKey, String
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import relationship, validates
from sqlalchemy.orm.exc import MultipleResultsFound
from sqlalchemy.schema import UniqueConstraint
from inbox.logging import get_logger
from inbox.models.base import MailSyncBase
from inbox.models.constants import MAX_INDEXABLE_LENGTH
from inbox.models.mixins import (
CaseInsensitiveComparator,
DeletedAtMixin,
HasPublicID,
HasRevisions,
UpdatedAtMixin,
)
from inbox.sqlalchemy_ext.util import StringWithTransform
from inbox.util.encoding import unicode_safe_truncate
from inbox.util.misc import fs_folder_path
log = get_logger()
EPOCH = datetime.utcfromtimestamp(0)
def sanitize_name(name):
return unicode_safe_truncate(name, MAX_INDEXABLE_LENGTH)
class CategoryNameString(StringWithTransform):
"""
CategoryNameString is a Column type that extends our
sqlalchemy_ext.util.StringWithTransform to initialize it with the correct
sanitization procedure and the correct string length and collation we use
for category names.
We store rstripped and truncated category names, so this class will
ensure that all strings of this type are correctly truncated and sanitized,
and the input of any `==` queries executed against a Column of this
type match the values that we are actually storing in the database.
"""
def __init__(self, *args, **kwargs):
super().__init__(sanitize_name, MAX_INDEXABLE_LENGTH, collation="utf8mb4_bin")
class Category(MailSyncBase, HasRevisions, HasPublicID, UpdatedAtMixin, DeletedAtMixin):
@property
def API_OBJECT_NAME(self):
return self.type_
# Override the default `deleted_at` column with one that is NOT NULL --
# Category.deleted_at is needed in a UniqueConstraint.
# Set the default Category.deleted_at = EPOCH instead.
deleted_at = Column(
DateTime, index=True, nullable=False, default="1970-01-01 00:00:00"
)
# Need `use_alter` here to avoid circular dependencies
namespace_id = Column(
ForeignKey(
"namespace.id", use_alter=True, name="category_fk1", ondelete="CASCADE"
),
nullable=False,
)
namespace = relationship("Namespace", load_on_pending=True)
# STOPSHIP(emfree): need to index properly for API filtering performance.
name = Column(String(MAX_INDEXABLE_LENGTH), nullable=False, default="")
display_name = Column(CategoryNameString(), nullable=False)
type_ = Column(Enum("folder", "label"), nullable=False, default="folder")
@validates("display_name")
def validate_display_name(self, key, display_name):
sanitized_name = sanitize_name(display_name)
if sanitized_name != display_name:
log.warning(
"Truncating category display_name",
type_=self.type_,
original=display_name,
)
return sanitized_name
@classmethod
def find_or_create(cls, session, namespace_id, name, display_name, type_):
name = name or ""
objects = (
session.query(cls)
.filter(cls.namespace_id == namespace_id, cls.display_name == display_name)
.all()
)
if not objects:
obj = cls(
namespace_id=namespace_id,
name=name,
display_name=display_name,
type_=type_,
deleted_at=EPOCH,
)
session.add(obj)
elif len(objects) == 1:
obj = objects[0]
if not obj.name:
# There is an existing category with this `display_name` and no
# `name`, so update it's `name` as needed.
# This is needed because the first time we sync generic IMAP
# folders, they may initially have `name` == '' but later they may
# get a `name`. At this point, it *is* the same folder so we
# merely want to update its `name`, not create a new one.
obj.name = name
else:
log.error(
"Duplicate category rows for namespace_id {}, "
"name {}, display_name: {}".format(namespace_id, name, display_name)
)
raise MultipleResultsFound(
"Duplicate category rows for namespace_id {}, name {}, "
"display_name: {}".format(namespace_id, name, display_name)
)
return obj
@classmethod
def create(cls, session, namespace_id, name, display_name, type_):
name = name or ""
obj = cls(
namespace_id=namespace_id,
name=name,
display_name=display_name,
type_=type_,
deleted_at=EPOCH,
)
session.add(obj)
return obj
@property
def account(self):
return self.namespace.account
@property
def METHOD_NAME(self):
return self.account.category_type
@hybrid_property
def lowercase_name(self):
return self.display_name.lower()
@lowercase_name.comparator
def lowercase_name(cls):
return CaseInsensitiveComparator(cls.display_name)
@property
def api_display_name(self):
if self.namespace.account.provider == "gmail":
if self.display_name.startswith("[Gmail]/"):
return self.display_name[8:]
elif self.display_name.startswith("[Google Mail]/"):
return self.display_name[14:]
if self.namespace.account.provider not in ["gmail", "microsoft"]:
return fs_folder_path(
self.display_name,
separator=self.namespace.account.folder_separator,
prefix=self.namespace.account.folder_prefix,
)
return self.display_name
@property
def is_deleted(self):
return self.deleted_at > EPOCH
__table_args__ = (
UniqueConstraint("namespace_id", "name", "display_name", "deleted_at"),
UniqueConstraint("namespace_id", "public_id"),
) |
298,593 | clear | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2022 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
import sys
import wx
from .. import utils
from . import functions
import re
from pathlib import Path
from psychopy.localization import _translate
from ...tools.stringtools import valid_proj_name
class SyncDialog(wx.Dialog):
def __init__(self, parent, project):
wx.Dialog.__init__(self, parent, title=_translate("Syncing project..."))
self.project = project
# Setup sizer
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self.sizer)
# Create status panel
self.status = InfoStream(self, id=wx.ID_ANY, size=(-1, -1),
value=_translate("Synchronising..."),
style=wx.TE_READONLY | wx.TE_MULTILINE)
self.sizer.Add(self.status, border=6, proportion=1, flag=wx.ALL | wx.EXPAND)
# Setup button sizer
self.btnSizer = wx.BoxSizer(wx.HORIZONTAL)
self.sizer.Add(self.btnSizer, border=6, flag=wx.ALL | wx.EXPAND)
self.btnSizer.AddStretchSpacer(1)
# Add buttons
self.OKbtn = wx.Button(self, label=_translate("OK"), id=wx.ID_OK)
self.OKbtn.Disable()
self.btnSizer.Add(self.OKbtn, border=3, flag=wx.LEFT | wx.ALIGN_CENTER_VERTICAL)
# Layout
self.Layout()
self.Show()
def sync(self):
# If there's no user yet, login
if self.project.session.user is None:
functions.logInPavlovia(self)
# Do sync
self.project.sync(self.status)
self.OKbtn.Enable()
class InfoStream(wx.TextCtrl):
def __init__(self, parent, id, size,
value="Synchronising...",
style=wx.TE_READONLY | wx.TE_MULTILINE):
wx.TextCtrl.__init__(self, parent, id,
size=size, value=value, style=style)
def METHOD_NAME(self):
self.SetValue("")
def write(self, text):
if type(text) == bytes:
text = text.decode('utf-8')
# Sanitize text (remove sensitive info like oauth keys)
text = utils.sanitize(text)
# Show
self.SetValue(self.GetValue() + text)
wx.Yield()
class CreateDlg(wx.Dialog):
# List of folders which are invalid paths for a pavlovia project
invalidFolders = [Path.home() / 'Desktop',
Path.home() / 'My Documents',
Path.home() / 'Documents']
def __init__(self, parent, user, name="", path=""):
wx.Dialog.__init__(self, parent=parent,
title=_translate("New project..."),
size=(500, 200), style=wx.DEFAULT_DIALOG_STYLE | wx.CLOSE_BOX)
# If there's no user yet, login
if user is None:
user = functions.logInPavlovia(self)
self.user = user
self.session = parent.session
self.project = None
# Setup sizer
self.frame = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self.frame)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.frame.Add(self.sizer, border=6, proportion=1, flag=wx.ALL | wx.EXPAND)
# Name label
self.nameLbl = wx.StaticText(self, label=_translate("Project name:"))
self.sizer.Add(self.nameLbl, border=3, flag=wx.ALL | wx.EXPAND)
# Name ctrls
self.nameSizer = wx.BoxSizer(wx.HORIZONTAL)
self.sizer.Add(self.nameSizer, border=3, flag=wx.ALL | wx.EXPAND)
# URL prefix
self.nameRootLbl = wx.StaticText(self, label="pavlovia.org /")
self.nameSizer.Add(self.nameRootLbl, border=3, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
# Namespace ctrl
self.namespaceCtrl = wx.Choice(self, choices=[user['username']] + user.session.listUserGroups(namesOnly=True))
self.namespaceCtrl.SetStringSelection(user['username'])
self.nameSizer.Add(self.namespaceCtrl, border=3, flag=wx.ALL | wx.EXPAND)
# Slash
self.slashLbl = wx.StaticText(self, label="/")
self.nameSizer.Add(self.slashLbl, border=3, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
# Name ctrl
self.nameCtrl = wx.TextCtrl(self, value=str(name))
self.nameCtrl.Bind(wx.EVT_TEXT, self.validate)
self.nameSizer.Add(self.nameCtrl, border=3, proportion=1, flag=wx.ALL | wx.EXPAND)
# Local root label
self.rootLbl = wx.StaticText(self, label=_translate("Project folder:"))
self.sizer.Add(self.rootLbl, border=3, flag=wx.ALL | wx.EXPAND)
# Local root ctrl
self.rootCtrl = utils.FileCtrl(self, value=str(path), dlgtype="dir")
self.rootCtrl.Bind(wx.EVT_FILEPICKER_CHANGED, self.validate)
self.sizer.Add(self.rootCtrl, border=3, flag=wx.ALL | wx.EXPAND)
# Add dlg buttons
self.sizer.AddStretchSpacer(1)
self.btnSizer = wx.BoxSizer(wx.HORIZONTAL)
self.sizer.Add(self.btnSizer, border=3, flag=wx.ALL | wx.EXPAND)
self.btnSizer.AddStretchSpacer(1)
# OK button
self.OKbtn = wx.Button(self, id=wx.ID_OK, label=_translate("OK"))
self.OKbtn.Bind(wx.EVT_BUTTON, self.submit)
# CANCEL button
self.CANCELbtn = wx.Button(self, id=wx.ID_CANCEL, label=_translate("Cancel"))
# Add dlg buttons in OS appropriate order
if sys.platform == "win32":
btns = [self.OKbtn, self.CANCELbtn]
else:
btns = [self.CANCELbtn, self.OKbtn]
self.btnSizer.Add(btns[0], border=3, flag=wx.ALL)
self.btnSizer.Add(btns[1], border=3, flag=wx.ALL)
self.Layout()
self.validate()
def validate(self, evt=None):
# Test name
name = self.nameCtrl.GetValue()
nameValid = bool(valid_proj_name.fullmatch(name))
# Test path
path = Path(self.rootCtrl.GetValue())
pathValid = path.is_dir() and path not in self.invalidFolders
# Combine
valid = nameValid and pathValid
# Enable/disable Okay button
self.OKbtn.Enable(valid)
return valid
def submit(self, evt=None):
self.project = self.session.createProject(**self.GetValue())
if self.project is not None:
self.project.refresh()
evt.Skip()
def GetValue(self):
return {
"name": self.nameCtrl.GetValue(),
"localRoot": self.rootCtrl.GetValue(),
"namespace": self.namespaceCtrl.GetStringSelection()
|
298,594 | service reset | """Agent manager."""
import asyncio
from collections import defaultdict
from typing import Any, Dict, List, Mapping, MutableMapping, MutableSet, Set
from weakref import WeakSet
from mode import Service
from mode.utils.collections import ManagedUserDict
from mode.utils.compat import OrderedDict
from mode.utils.locks import Event
from faust.types import AgentManagerT, AgentT, AppT
from faust.types.tuples import TP, tp_set_to_map
from faust.utils.tracing import traced_from_parent_span
TRACEBACK_HEADER = """
=======================================
TRACEBACK OF ALL RUNNING AGENT ACTORS
=======================================
"""
TRACEBACK_FORMAT = """
* {name} ----->
============================================================
{traceback}
"""
TRACEBACK_FOOTER = """
-eof tracebacks- :-)
"""
class AgentManager(Service, AgentManagerT, ManagedUserDict):
"""Agent manager."""
traceback_header: str = TRACEBACK_HEADER
traceback_format: str = TRACEBACK_FORMAT
traceback_footer: str = TRACEBACK_FOOTER
_by_topic: MutableMapping[str, MutableSet[AgentT]]
def __init__(self, app: AppT, **kwargs: Any) -> None:
self.app = app
self.data = OrderedDict()
self._by_topic = defaultdict(WeakSet)
self._agents_started = Event()
Service.__init__(self, **kwargs)
def __hash__(self) -> int:
return object.__hash__(self)
async def on_start(self) -> None:
"""Call when agents are being started."""
self.update_topic_index()
for agent in self.values():
await agent.maybe_start()
self._agents_started.set()
def actor_tracebacks(self) -> Mapping[str, List[str]]:
return {name: agent.actor_tracebacks() for name, agent in self.items()}
def human_tracebacks(self) -> str:
return "\n".join(
[
self.traceback_header,
"\n".join(
self.traceback_format.format(
name=name,
traceback=traceback,
)
for name, traceback in self.actor_tracebacks().items()
),
self.traceback_footer,
]
)
async def wait_until_agents_started(self) -> None:
if not self.app.producer_only and not self.app.client_only:
await self.wait_for_stopped(self._agents_started)
def METHOD_NAME(self) -> None:
"""Reset service state on restart."""
[agent.METHOD_NAME() for agent in self.values()]
super().METHOD_NAME()
async def on_stop(self) -> None:
"""Call when agents are being stopped."""
for agent in self.values():
try:
await asyncio.shield(agent.stop())
except asyncio.CancelledError:
pass
async def stop(self) -> None:
"""Stop all running agents."""
# Cancel first so _execute_actor sees we are not stopped.
self.cancel()
# Then stop the agents
await super().stop()
def cancel(self) -> None:
"""Cancel all running agents."""
[agent.cancel() for agent in self.values()]
def update_topic_index(self) -> None:
"""Update indices."""
# keep mapping from topic name to set of agents.
by_topic_index = self._by_topic
for agent in self.values():
for topic in agent.get_topic_names():
by_topic_index[topic].add(agent)
async def on_rebalance(self, revoked: Set[TP], newly_assigned: Set[TP]) -> None:
"""Call when a rebalance is needed."""
T = traced_from_parent_span()
# for isolated_partitions agents we stop agents for revoked
# partitions.
for agent, tps in self._collect_agents_for_update(revoked).items():
await T(agent.on_partitions_revoked)(tps)
# for isolated_partitions agents we start agents for newly
# assigned partitions
for agent, tps in T(self._collect_agents_for_update)(newly_assigned).items():
await T(agent.on_partitions_assigned)(tps)
def _collect_agents_for_update(self, tps: Set[TP]) -> Dict[AgentT, Set[TP]]:
by_agent: Dict[AgentT, Set[TP]] = defaultdict(set)
for topic, tps_of_topic in tp_set_to_map(tps).items():
for agent in self._by_topic[topic]:
by_agent[agent].update(tps_of_topic)
return by_agent |
298,595 | test no var pred | import numpy as np
from numpy.testing import assert_allclose
import pandas as pd
import pytest
import statsmodels.datasets
from statsmodels.tsa.ar_model import AutoReg
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.tsa.base.prediction import PredictionResults
from statsmodels.tsa.deterministic import Fourier
from statsmodels.tsa.exponential_smoothing.ets import ETSModel
from statsmodels.tsa.forecasting.stl import STLForecast
from statsmodels.tsa.seasonal import STL, DecomposeResult
from statsmodels.tsa.statespace.exponential_smoothing import (
ExponentialSmoothing,
)
@pytest.fixture(scope="module")
def data(request):
rs = np.random.RandomState(987654321)
err = rs.standard_normal(500)
index = pd.date_range("1980-1-1", freq="M", periods=500)
fourier = Fourier(12, 1)
terms = fourier.in_sample(index)
det = np.squeeze(np.asarray(terms @ np.array([[2], [1]])))
for i in range(1, 500):
err[i] += 0.9 * err[i - 1] + det[i]
return pd.Series(err, index=index)
def test_smoke(data):
stlf = STLForecast(data, ARIMA, model_kwargs={"order": (2, 0, 0)})
res = stlf.fit(fit_kwargs={})
res.forecast(37)
assert isinstance(res.summary().as_text(), str)
assert isinstance(res.stl, STL)
assert isinstance(res.result, DecomposeResult)
assert isinstance(res.model, ARIMA)
assert hasattr(res.model_result, "forecast")
@pytest.mark.matplotlib
def test_sharex(data):
stlf = STLForecast(data, ARIMA, model_kwargs={"order": (2, 0, 0)})
res = stlf.fit(fit_kwargs={})
plt = res.result.plot()
grouper_view = plt.axes[0].get_shared_x_axes()
sibs = grouper_view.get_siblings(plt.axes[1])
assert len(sibs) == 4
MODELS = [
(ARIMA, {"order": (2, 0, 0), "trend": "c"}),
(ExponentialSmoothing, {"trend": True}),
(AutoReg, {"lags": 2, "old_names": False}),
(ETSModel, {}),
]
MODELS = MODELS[-1:]
IDS = [str(c[0]).split(".")[-1][:-2] for c in MODELS]
@pytest.mark.parametrize("config", MODELS, ids=IDS)
@pytest.mark.parametrize("horizon", [1, 7, 23])
def test_equivalence_forecast(data, config, horizon):
model, kwargs = config
stl = STL(data)
stl_fit = stl.fit()
resids = data - stl_fit.seasonal
mod = model(resids, **kwargs)
fit_kwarg = {}
if model is ETSModel:
fit_kwarg["disp"] = False
res = mod.fit(**fit_kwarg)
stlf = STLForecast(data, model, model_kwargs=kwargs).fit(
fit_kwargs=fit_kwarg
)
seasonal = np.asarray(stl_fit.seasonal)[-12:]
seasonal = np.tile(seasonal, 1 + horizon // 12)
fcast = res.forecast(horizon) + seasonal[:horizon]
actual = stlf.forecast(horizon)
assert_allclose(actual, fcast, rtol=1e-4)
if not hasattr(res, "get_prediction"):
return
pred = stlf.get_prediction(data.shape[0], data.shape[0] + horizon - 1)
assert isinstance(pred, PredictionResults)
assert_allclose(pred.predicted_mean, fcast, rtol=1e-4)
half = data.shape[0] // 2
stlf.get_prediction(half, data.shape[0] + horizon - 1)
stlf.get_prediction(half, data.shape[0] + horizon - 1, dynamic=True)
stlf.get_prediction(half, data.shape[0] + horizon - 1, dynamic=half // 2)
if hasattr(data, "index"):
loc = data.index[half + half // 2]
a = stlf.get_prediction(
half, data.shape[0] + horizon - 1, dynamic=loc.strftime("%Y-%m-%d")
)
b = stlf.get_prediction(
half, data.shape[0] + horizon - 1, dynamic=loc.to_pydatetime()
)
c = stlf.get_prediction(half, data.shape[0] + horizon - 1, dynamic=loc)
assert_allclose(a.predicted_mean, b.predicted_mean, rtol=1e-4)
assert_allclose(a.predicted_mean, c.predicted_mean, rtol=1e-4)
def test_exceptions(data):
class BadModel:
def __init__(self, *args, **kwargs):
pass
with pytest.raises(AttributeError, match="model must expose"):
STLForecast(data, BadModel)
class NoForecast(BadModel):
def fit(self, *args, **kwargs):
return BadModel()
with pytest.raises(AttributeError, match="The model's result"):
STLForecast(data, NoForecast).fit()
class BadResult:
def forecast(self, *args, **kwargs):
pass
class FakeModel(BadModel):
def fit(self, *args, **kwargs):
return BadResult()
with pytest.raises(AttributeError, match="The model result does not"):
STLForecast(data, FakeModel).fit().summary()
class BadResultSummary(BadResult):
def summary(self, *args, **kwargs):
return object()
class FakeModelSummary(BadModel):
def fit(self, *args, **kwargs):
return BadResultSummary()
with pytest.raises(TypeError, match="The model result's summary"):
STLForecast(data, FakeModelSummary).fit().summary()
@pytest.fixture(scope="function")
def sunspots():
df = statsmodels.datasets.sunspots.load_pandas().data
df.index = np.arange(df.shape[0])
return df.iloc[:, 0]
def test_get_prediction(sunspots):
# GH7309
stlf_model = STLForecast(
sunspots, model=ARIMA, model_kwargs={"order": (2, 2, 0)}, period=11
)
stlf_res = stlf_model.fit()
pred = stlf_res.get_prediction()
assert pred.predicted_mean.shape == (309,)
assert pred.var_pred_mean.shape == (309,)
@pytest.mark.parametrize("not_implemented", [True, False])
def METHOD_NAME(sunspots, not_implemented):
class DummyPred:
def __init__(self, predicted_mean, row_labels):
self.predicted_mean = predicted_mean
self.row_labels = row_labels
def f():
raise NotImplementedError
if not_implemented:
self.forecast = property(f)
class DummyRes:
def __init__(self, res):
self._res = res
def forecast(self, *args, **kwargs):
return self._res.forecast(*args, **kwargs)
def get_prediction(self, *args, **kwargs):
pred = self._res.get_prediction(*args, **kwargs)
return DummyPred(pred.predicted_mean, pred.row_labels)
class DummyMod:
def __init__(self, y):
self._mod = ARIMA(y)
def fit(self, *args, **kwargs):
res = self._mod.fit(*args, **kwargs)
return DummyRes(res)
stl_mod = STLForecast(sunspots, model=DummyMod, period=11)
stl_res = stl_mod.fit()
with pytest.warns(UserWarning, match="The variance of"):
pred = stl_res.get_prediction()
assert np.all(np.isnan(pred.var_pred_mean)) |
298,596 | index code | # cython: language_level=3
from __future__ import absolute_import
from .PyrexTypes import CType, CTypedefType, CStructOrUnionType
import cython
try:
import pythran
pythran_is_pre_0_9 = tuple(map(int, pythran.__version__.split('.')[0:2])) < (0, 9)
pythran_is_pre_0_9_6 = tuple(map(int, pythran.__version__.split('.')[0:3])) < (0, 9, 6)
except ImportError:
pythran = None
pythran_is_pre_0_9 = True
pythran_is_pre_0_9_6 = True
if pythran_is_pre_0_9_6:
pythran_builtins = '__builtin__'
else:
pythran_builtins = 'builtins'
# Pythran/Numpy specific operations
def has_np_pythran(env):
if env is None:
return False
directives = getattr(env, 'directives', None)
return (directives and directives.get('np_pythran', False))
@cython.ccall
def is_pythran_supported_dtype(type_):
if isinstance(type_, CTypedefType):
return is_pythran_supported_type(type_.typedef_base_type)
return type_.is_numeric
def pythran_type(Ty, ptype="ndarray"):
if Ty.is_buffer:
ndim,dtype = Ty.ndim, Ty.dtype
if isinstance(dtype, CStructOrUnionType):
ctype = dtype.cname
elif isinstance(dtype, CType):
ctype = dtype.sign_and_name()
elif isinstance(dtype, CTypedefType):
ctype = dtype.typedef_cname
else:
raise ValueError("unsupported type %s!" % dtype)
if pythran_is_pre_0_9:
return "pythonic::types::%s<%s,%d>" % (ptype,ctype, ndim)
else:
return "pythonic::types::%s<%s,pythonic::types::pshape<%s>>" % (ptype,ctype, ",".join(("long",)*ndim))
if Ty.is_pythran_expr:
return Ty.pythran_type
#if Ty.is_none:
# return "decltype(pythonic::builtins::None)"
if Ty.is_numeric:
return Ty.sign_and_name()
raise ValueError("unsupported pythran type %s (%s)" % (Ty, type(Ty)))
@cython.cfunc
def type_remove_ref(ty):
return "typename std::remove_reference<%s>::type" % ty
def pythran_binop_type(op, tA, tB):
if op == '**':
return 'decltype(pythonic::numpy::functor::power{}(std::declval<%s>(), std::declval<%s>()))' % (
pythran_type(tA), pythran_type(tB))
else:
return "decltype(std::declval<%s>() %s std::declval<%s>())" % (
pythran_type(tA), op, pythran_type(tB))
def pythran_unaryop_type(op, type_):
return "decltype(%sstd::declval<%s>())" % (
op, pythran_type(type_))
@cython.cfunc
def _index_access(index_code, indices):
indexing = ",".join([index_code(idx) for idx in indices])
return ('[%s]' if len(indices) == 1 else '(%s)') % indexing
def _index_type_code(index_with_type):
idx, index_type = index_with_type
if idx.is_slice:
n = 2 + int(not idx.step.is_none)
return "pythonic::%s::functor::slice{}(%s)" % (
pythran_builtins,
",".join(["0"]*n))
elif index_type.is_int:
return "std::declval<%s>()" % index_type.sign_and_name()
elif index_type.is_pythran_expr:
return "std::declval<%s>()" % index_type.pythran_type
raise ValueError("unsupported indexing type %s!" % index_type)
def METHOD_NAME(idx):
if idx.is_slice:
values = idx.start, idx.stop, idx.step
if idx.step.is_none:
func = "contiguous_slice"
values = values[:2]
else:
func = "slice"
return "pythonic::types::%s(%s)" % (
func, ",".join((v.pythran_result() for v in values)))
elif idx.type.is_int:
return to_pythran(idx)
elif idx.type.is_pythran_expr:
return idx.pythran_result()
raise ValueError("unsupported indexing type %s" % idx.type)
def pythran_indexing_type(type_, indices):
return type_remove_ref("decltype(std::declval<%s>()%s)" % (
pythran_type(type_),
_index_access(_index_type_code, indices),
))
def pythran_indexing_code(indices):
return _index_access(METHOD_NAME, indices)
def np_func_to_list(func):
if not func.is_numpy_attribute:
return []
return np_func_to_list(func.obj) + [func.attribute]
if pythran is None:
def pythran_is_numpy_func_supported(name):
return False
else:
def pythran_is_numpy_func_supported(func):
CurF = pythran.tables.MODULES['numpy']
FL = np_func_to_list(func)
for F in FL:
CurF = CurF.get(F, None)
if CurF is None:
return False
return True
def pythran_functor(func):
func = np_func_to_list(func)
submodules = "::".join(func[:-1] + ["functor"])
return "pythonic::numpy::%s::%s" % (submodules, func[-1])
def pythran_func_type(func, args):
args = ",".join(("std::declval<%s>()" % pythran_type(a.type) for a in args))
return "decltype(%s{}(%s))" % (pythran_functor(func), args)
@cython.ccall
def to_pythran(op, ptype=None):
op_type = op.type
if op_type.is_int:
# Make sure that integer literals always have exactly the type that the templates expect.
return op_type.cast_code(op.result())
if is_type(op_type, ["is_pythran_expr", "is_numeric", "is_float", "is_complex"]):
return op.result()
if op.is_none:
return "pythonic::%s::None" % pythran_builtins
if ptype is None:
ptype = pythran_type(op_type)
assert op.type.is_pyobject
return "from_python<%s>(%s)" % (ptype, op.py_result())
@cython.cfunc
def is_type(type_, types):
for attr in types:
if getattr(type_, attr, False):
return True
return False
def is_pythran_supported_node_or_none(node):
return node.is_none or is_pythran_supported_type(node.type)
@cython.ccall
def is_pythran_supported_type(type_):
pythran_supported = (
"is_pythran_expr", "is_int", "is_numeric", "is_float", "is_none", "is_complex")
return is_type(type_, pythran_supported) or is_pythran_expr(type_)
def is_pythran_supported_operation_type(type_):
pythran_supported = (
"is_pythran_expr", "is_int", "is_numeric", "is_float", "is_complex")
return is_type(type_,pythran_supported) or is_pythran_expr(type_)
@cython.ccall
def is_pythran_expr(type_):
return type_.is_pythran_expr
def is_pythran_buffer(type_):
return (type_.is_numpy_buffer and is_pythran_supported_dtype(type_.dtype) and
type_.mode in ("c", "strided") and not type_.cast)
def pythran_get_func_include_file(func):
func = np_func_to_list(func)
return "pythonic/numpy/%s.hpp" % "/".join(func)
def include_pythran_generic(env):
# Generic files
env.add_include_file("pythonic/core.hpp")
env.add_include_file("pythonic/python/core.hpp")
env.add_include_file("pythonic/types/bool.hpp")
env.add_include_file("pythonic/types/ndarray.hpp")
env.add_include_file("pythonic/numpy/power.hpp")
env.add_include_file("pythonic/%s/slice.hpp" % pythran_builtins)
env.add_include_file("<new>") # for placement new
for i in (8, 16, 32, 64):
env.add_include_file("pythonic/types/uint%d.hpp" % i)
env.add_include_file("pythonic/types/int%d.hpp" % i)
for t in ("float", "float32", "float64", "set", "slice", "tuple", "int",
"complex", "complex64", "complex128"):
env.add_include_file("pythonic/types/%s.hpp" % t) |
298,597 | estimate pointcloud local coord frames | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Tuple, TYPE_CHECKING, Union
import torch
from pytorch3d.common.workaround import symeig3x3
from .utils import convert_pointclouds_to_tensor, get_point_covariances
if TYPE_CHECKING:
from ..structures import Pointclouds
def estimate_pointcloud_normals(
pointclouds: Union[torch.Tensor, "Pointclouds"],
neighborhood_size: int = 50,
disambiguate_directions: bool = True,
*,
use_symeig_workaround: bool = True,
) -> torch.Tensor:
"""
Estimates the normals of a batch of `pointclouds`.
The function uses `estimate_pointcloud_local_coord_frames` to estimate
the normals. Please refer to that function for more detailed information.
Args:
**pointclouds**: Batch of 3-dimensional points of shape
`(minibatch, num_point, 3)` or a `Pointclouds` object.
**neighborhood_size**: The size of the neighborhood used to estimate the
geometry around each point.
**disambiguate_directions**: If `True`, uses the algorithm from [1] to
ensure sign consistency of the normals of neighboring points.
**use_symeig_workaround**: If `True`, uses a custom eigenvalue
calculation.
Returns:
**normals**: A tensor of normals for each input point
of shape `(minibatch, num_point, 3)`.
If `pointclouds` are of `Pointclouds` class, returns a padded tensor.
References:
[1] Tombari, Salti, Di Stefano: Unique Signatures of Histograms for
Local Surface Description, ECCV 2010.
"""
curvatures, local_coord_frames = METHOD_NAME(
pointclouds,
neighborhood_size=neighborhood_size,
disambiguate_directions=disambiguate_directions,
use_symeig_workaround=use_symeig_workaround,
)
# the normals correspond to the first vector of each local coord frame
normals = local_coord_frames[:, :, :, 0]
return normals
def METHOD_NAME(
pointclouds: Union[torch.Tensor, "Pointclouds"],
neighborhood_size: int = 50,
disambiguate_directions: bool = True,
*,
use_symeig_workaround: bool = True,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Estimates the principal directions of curvature (which includes normals)
of a batch of `pointclouds`.
The algorithm first finds `neighborhood_size` nearest neighbors for each
point of the point clouds, followed by obtaining principal vectors of
covariance matrices of each of the point neighborhoods.
The main principal vector corresponds to the normals, while the
other 2 are the direction of the highest curvature and the 2nd highest
curvature.
Note that each principal direction is given up to a sign. Hence,
the function implements `disambiguate_directions` switch that allows
to ensure consistency of the sign of neighboring normals. The implementation
follows the sign disabiguation from SHOT descriptors [1].
The algorithm also returns the curvature values themselves.
These are the eigenvalues of the estimated covariance matrices
of each point neighborhood.
Args:
**pointclouds**: Batch of 3-dimensional points of shape
`(minibatch, num_point, 3)` or a `Pointclouds` object.
**neighborhood_size**: The size of the neighborhood used to estimate the
geometry around each point.
**disambiguate_directions**: If `True`, uses the algorithm from [1] to
ensure sign consistency of the normals of neighboring points.
**use_symeig_workaround**: If `True`, uses a custom eigenvalue
calculation.
Returns:
**curvatures**: The three principal curvatures of each point
of shape `(minibatch, num_point, 3)`.
If `pointclouds` are of `Pointclouds` class, returns a padded tensor.
**local_coord_frames**: The three principal directions of the curvature
around each point of shape `(minibatch, num_point, 3, 3)`.
The principal directions are stored in columns of the output.
E.g. `local_coord_frames[i, j, :, 0]` is the normal of
`j`-th point in the `i`-th pointcloud.
If `pointclouds` are of `Pointclouds` class, returns a padded tensor.
References:
[1] Tombari, Salti, Di Stefano: Unique Signatures of Histograms for
Local Surface Description, ECCV 2010.
"""
points_padded, num_points = convert_pointclouds_to_tensor(pointclouds)
ba, N, dim = points_padded.shape
if dim != 3:
raise ValueError(
"The pointclouds argument has to be of shape (minibatch, N, 3)"
)
if (num_points <= neighborhood_size).any():
raise ValueError(
"The neighborhood_size argument has to be"
+ " >= size of each of the point clouds."
)
# undo global mean for stability
# TODO: replace with tutil.wmean once landed
pcl_mean = points_padded.sum(1) / num_points[:, None]
points_centered = points_padded - pcl_mean[:, None, :]
# get the per-point covariance and nearest neighbors used to compute it
cov, knns = get_point_covariances(points_centered, num_points, neighborhood_size)
# get the local coord frames as principal directions of
# the per-point covariance
# this is done with torch.symeig / torch.linalg.eigh, which returns the
# eigenvectors (=principal directions) in an ascending order of their
# corresponding eigenvalues, and the smallest eigenvalue's eigenvector
# corresponds to the normal direction; or with a custom equivalent.
if use_symeig_workaround:
curvatures, local_coord_frames = symeig3x3(cov, eigenvectors=True)
else:
curvatures, local_coord_frames = torch.linalg.eigh(cov)
# disambiguate the directions of individual principal vectors
if disambiguate_directions:
# disambiguate normal
n = _disambiguate_vector_directions(
points_centered, knns, local_coord_frames[:, :, :, 0]
)
# disambiguate the main curvature
z = _disambiguate_vector_directions(
points_centered, knns, local_coord_frames[:, :, :, 2]
)
# the secondary curvature is just a cross between n and z
y = torch.cross(n, z, dim=2)
# cat to form the set of principal directions
local_coord_frames = torch.stack((n, y, z), dim=3)
return curvatures, local_coord_frames
def _disambiguate_vector_directions(pcl, knns, vecs: torch.Tensor) -> torch.Tensor:
"""
Disambiguates normal directions according to [1].
References:
[1] Tombari, Salti, Di Stefano: Unique Signatures of Histograms for
Local Surface Description, ECCV 2010.
"""
# parse out K from the shape of knns
K = knns.shape[2]
# the difference between the mean of each neighborhood and
# each element of the neighborhood
df = knns - pcl[:, :, None]
# projection of the difference on the principal direction
proj = (vecs[:, :, None] * df).sum(3)
# check how many projections are positive
n_pos = (proj > 0).type_as(knns).sum(2, keepdim=True)
# flip the principal directions where number of positive correlations
flip = (n_pos < (0.5 * K)).type_as(knns)
vecs = (1.0 - 2.0 * flip) * vecs
return vecs |
298,598 | add file | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Azimuthal integration
# https://github.com/silx-kit/pyFAI
#
# File: "$Id$"
#
# Copyright (C) 2018 European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Refine the wavelength from a set of images taken with various geometries.
For numerical efficiency, the wavelength has to be in ANGSTROM
Usage:
$ refine_wavelength -w=1.54 poni1 poni2 poni3 poni4
"""
__author__ = "Jérôme Kieffer"
__contact__ = "Jerome.Kieffer@ESRF.eu"
__license__ = "GPLv3+"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "29/07/2016"
import sys, os, logging
import numpy
from numpy import sin, arcsin
from scipy.optimize import fmin_slsqp
from pyFAI.geometryRefinement import GeometryRefinement
from pyFAI.third_party import six
class RefineWavelength(object):
def __init__(self, wavelength, listFiles=None):
self.wavelength_init = wavelength
self.wavelength = wavelength
self.listFiles = []
self.listPoni = []
self.listPoints = []
self.listRefiner = []
self.bounds = []
self.param = []
if listFiles:
for oneFile in listFiles:
self.METHOD_NAME(oneFile)
self.param.append(wavelength)
self.bounds.append((0.9 * wavelength, 1.1 * wavelength))
self.nbImg = len(self.listPoni)
def __repr__(self):
lstTxt = [ "%s\t%6.3f\t%6.3f\t%6.3f\t%6.3f\t%6.3f\t%6.3f" % (j, i.dist, i.poni1, i.poni2, i.rot1, i.rot2, i.rot3) for i, j in zip(self.listRefiner, self.listFiles)]
lstTxt.append("Wavelength: %s" % self.wavelength)
return os.linesep.join(lstTxt)
def METHOD_NAME(self, filename):
if filename in self.listFiles:
return
basename = os.path.splitext(filename)[0]
if filename.endswith(".poni") and os.path.isfile(basename + ".py"):
poni = filename
pointFile = basename + ".py"
elif filename.endswith(".py") and os.path.isfile(basename + ".poni"):
poni = filename
pointFile = basename + ".py"
try:
points = eval(open(pointFile).read().replace("data=", ""))
except:
logging.error("error in reading %s" % pointFile)
return
refinement = GeometryRefinement(points)
refinement.load(poni)
# print refinement
self.listPoints.append(numpy.array(points).astype("float64"))
self.listPoni.append(refinement.param)
refinement.dist_min = 0.5 * refinement.dist
refinement.dist_max = 2 * refinement.dist
refinement.poni1_min = min(0.9 * refinement.poni1, 1.1 * refinement.poni1)
refinement.poni1_max = max(0.9 * refinement.poni1, 1.1 * refinement.poni1)
refinement.poni2_min = min(0.9 * refinement.poni2, 1.1 * refinement.poni2)
refinement.poni2_max = max(0.9 * refinement.poni2, 1.1 * refinement.poni2)
refinement.rot1_min = min(0.9 * refinement.rot1, 1.1 * refinement.rot1)
refinement.rot1_max = max(0.9 * refinement.rot1, 1.1 * refinement.rot1)
refinement.rot2_min = min(0.9 * refinement.rot2, 1.1 * refinement.rot2)
refinement.rot2_max = max(0.9 * refinement.rot2, 1.1 * refinement.rot2)
refinement.rot3_min = min(0.9 * refinement.rot3, 1.1 * refinement.rot3)
refinement.rot3_max = max(0.9 * refinement.rot3, 1.1 * refinement.rot3)
self.listRefiner.append(refinement)
self.listFiles.append(poni)
# self.listFiles.append(pointFile)
self.param += refinement.param
self.bounds += [(refinement.dist_min, refinement.dist_max),
(refinement.poni1_min, refinement.poni1_max),
(refinement.poni2_min, refinement.poni2_max),
(refinement.rot1_min, refinement.rot1_max),
(refinement.rot2_min, refinement.rot2_max),
(refinement.rot3_min, refinement.rot3_max)]
def residu1(self, param):
res = []
for i, ref in enumerate(self.listRefiner):
points = self.listPoints[i]
d1 = points[:, 0]
d2 = points[:, 1]
tth = 2 * arcsin(param[-1] * sin(points[:, 2] / 2.0) / self.wavelength_init)
res.append(ref.tth(d1, d2, param[i * 6: (i + 1) * 6]) - tth)
return numpy.concatenate(tuple(res))
def residu2(self, param):
return (self.residu1(param) ** 2).sum()
def chi2(self, param=None):
if param is not None:
return self.residu2(param)
else:
return self.residu2(self.param)
# sum = 0.0
# for oneRef in self.listRefiner:
# sum += oneRef.chi2()
# return sum
def refine2(self, maxiter=1000):
param = []
for i in self.listRefiner:
param.append(i.param)
param.append([self.wavelength])
self.param = numpy.concatenate(tuple(param)).astype("float64")
newParam = fmin_slsqp(self.residu2, self.param, iter=maxiter,
bounds=self.bounds, iprint=2,
acc=1.0e-12)
print newParam
print "Constrained Least square", self.chi2(), "--> ", self.chi2(newParam)
if self.chi2(newParam) < self.chi2():
i = abs(self.param - newParam).argmax()
print "maxdelta on: ", i, self.param[i], "-->", newParam[i]
self.param = newParam
self.wavelength = newParam[-1]
for i, ref in enumerate(self.listRefiner):
ref.param = newParam[6 * i:6 * (i + 1) ]
ref.dist = newParam[6 * i]
ref.poni1 = newParam[6 * i + 1]
ref.poni2 = newParam[6 * i + 2]
ref.rot1 = newParam[6 * i + 3]
ref.rot2 = newParam[6 * i + 4]
ref.rot3 = newParam[6 * i + 5]
if __name__ == "__main__":
if len(sys.argv) == 1:
print __doc__
sys.exit(1)
wavelength = None
listFiles = []
for arg in sys.argv[1:]:
if arg.find("-h") in [0, 1]:
print(__doc__)
sys.exit(0)
elif arg.find("-w=") in [0, 1]:
wavelength = float(arg.split("=")[1])
elif os.path.isfile(arg):
listFiles.append(arg)
if wavelength is None:
wavelength = six.moves.input("WaveLength (A) ? ")
refinement = RefineWavelength(wavelength, listFiles)
print refinement
last = refinement.chi2() + 1
# print last - 1
while refinement.chi2() < last:
last = refinement.chi2()
refinement.refine2()
print refinement |
298,599 | authorize otp | import string
import time
from threading import Lock
from typing import Sequence, Tuple
from flask_security import UserDatastore
from common.types import OTP, Token
from common.utils.code_utils import secure_generate_random_string
from monkey_island.cc.event_queue import IIslandEventQueue, IslandEventTopic
from monkey_island.cc.repositories import UnknownRecordError
from monkey_island.cc.server_utils.encryption import ILockableEncryptor
from . import AccountRole
from .i_otp_repository import IOTPRepository
from .user import User
OTP_EXPIRATION_TIME = 2 * 60 # 2 minutes
class AuthenticationFacade:
"""
A service for user authentication
"""
def __init__(
self,
repository_encryptor: ILockableEncryptor,
island_event_queue: IIslandEventQueue,
user_datastore: UserDatastore,
otp_repository: IOTPRepository,
token_ttl_sec: int,
):
self._repository_encryptor = repository_encryptor
self._island_event_queue = island_event_queue
self._datastore = user_datastore
self._otp_repository = otp_repository
self._token_ttl_sec = token_ttl_sec
self._otp_read_lock = Lock()
self._user_lock = Lock()
@property
def token_ttl_sec(self) -> int:
return self._token_ttl_sec
def needs_registration(self) -> bool:
"""
Checks if a user is already registered on the Island
:return: Whether registration is required on the Island
"""
island_api_user_role = self._datastore.find_or_create_role(
name=AccountRole.ISLAND_INTERFACE.name
)
return not self._datastore.find_user(roles=[island_api_user_role])
def remove_user(self, username: str):
"""
Unregisters a user, removing all tokens in the process
Idempotent. Will not do anything if the user does not exist.
:param username: Username of the user to unregister
"""
with self._user_lock:
user = self._datastore.find_user(username=username)
if user is not None:
self.revoke_all_tokens_for_user(user)
self._datastore.delete_user(user)
def revoke_all_tokens_for_user(self, user: User):
"""
Revokes all tokens for a specific user
"""
self._datastore.set_uniquifier(user)
def revoke_all_tokens_for_all_users(self):
"""
Revokes all tokens for all users
"""
for user in User.objects:
self.revoke_all_tokens_for_user(user)
def generate_otp(self) -> OTP:
"""
Generates a new OTP
The generated OTP is saved to the `IOTPRepository`
"""
otp = OTP(secure_generate_random_string(32, string.ascii_letters + string.digits + "._-"))
expiration_time = time.monotonic() + OTP_EXPIRATION_TIME
self._otp_repository.insert_otp(otp, expiration_time)
return otp
def refresh_user_token(self, user: User) -> Tuple[Token, int]:
"""
Refreshes the user's authentication token
:param user: The user to refresh the token for
:return: The new token and the time when it will expire (in Unix time)
"""
with self._user_lock:
self.revoke_all_tokens_for_user(user)
return Token(user.get_auth_token()), self._token_ttl_sec
def METHOD_NAME(self, otp: OTP) -> bool:
# SECURITY: This method must not run concurrently, otherwise there could be TOCTOU errors,
# resulting in an OTP being used twice.
with self._otp_read_lock:
try:
otp_is_used = self._otp_repository.otp_is_used(otp)
# When this method is called, that constitutes the OTP being "used".
# Set it as used ASAP.
self._otp_repository.set_used(otp)
if otp_is_used:
return False
if not self._otp_ttl_elapsed(otp):
return True
return False
except UnknownRecordError:
return False
def _otp_ttl_elapsed(self, otp: OTP) -> bool:
return self._otp_repository.get_expiration(otp) < time.monotonic()
def revoke_all_otps(self):
self._otp_repository.reset()
def create_user(
self, username: str, password: str, roles: Sequence[str], email: str = "dummy@dummy.com"
) -> User:
return self._datastore.create_user(
username=username,
password=password,
roles=roles,
email=email,
)
def handle_successful_registration(self, username: str, password: str):
self._reset_island_data()
self._reset_repository_encryptor(username, password)
def _reset_island_data(self):
"""
Resets the island
"""
self._island_event_queue.publish(IslandEventTopic.CLEAR_SIMULATION_DATA)
self._island_event_queue.publish(IslandEventTopic.RESET_AGENT_CONFIGURATION)
def _reset_repository_encryptor(self, username: str, password: str):
secret = _get_secret_from_credentials(username, password)
self._repository_encryptor.reset_key()
self._repository_encryptor.unlock(secret.encode())
def handle_successful_login(self, username: str, password: str):
self._unlock_repository_encryptor(username, password)
def _unlock_repository_encryptor(self, username: str, password: str):
secret = _get_secret_from_credentials(username, password)
self._repository_encryptor.unlock(secret.encode())
def _get_secret_from_credentials(username: str, password: str) -> str:
return f"{username}:{password}" |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.